]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/cephadm/tests/test_migration.py
update ceph source to reef 18.2.0
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_migration.py
CommitLineData
f6b5b4d7 1import json
05a536ef 2import pytest
f6b5b4d7
TL
3
4from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec
adb31ebb 5from ceph.utils import datetime_to_str, datetime_now
f6b5b4d7 6from cephadm import CephadmOrchestrator
f91f0fd5 7from cephadm.inventory import SPEC_STORE_PREFIX
20effc67
TL
8from cephadm.migrations import LAST_MIGRATION
9from cephadm.tests.fixtures import _run_cephadm, wait, with_host, receive_agent_metadata_all_hosts
f91f0fd5 10from cephadm.serve import CephadmServe
f6b5b4d7
TL
11from tests import mock
12
f91f0fd5 13
f67539c2 14@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
f6b5b4d7 15def test_migrate_scheduler(cephadm_module: CephadmOrchestrator):
f91f0fd5
TL
16 with with_host(cephadm_module, 'host1', refresh_hosts=False):
17 with with_host(cephadm_module, 'host2', refresh_hosts=False):
f6b5b4d7
TL
18
19 # emulate the old scheduler:
20 c = cephadm_module.apply_rgw(
21 ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2))
22 )
23 assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'
24
f91f0fd5
TL
25 # with pytest.raises(OrchestratorError, match="cephadm migration still ongoing. Please wait, until the migration is complete."):
26 CephadmServe(cephadm_module)._apply_all_services()
27
28 cephadm_module.migration_current = 0
29 cephadm_module.migration.migrate()
30 # assert we need all daemons.
31 assert cephadm_module.migration_current == 0
32
33 CephadmServe(cephadm_module)._refresh_hosts_and_daemons()
20effc67 34 receive_agent_metadata_all_hosts(cephadm_module)
f91f0fd5
TL
35 cephadm_module.migration.migrate()
36
37 CephadmServe(cephadm_module)._apply_all_services()
38
f6b5b4d7
TL
39 out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())}
40 assert out == {'host1', 'host2'}
41
42 c = cephadm_module.apply_rgw(
43 ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2))
44 )
45 assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'
46
f6b5b4d7
TL
47 # Sorry, for this hack, but I need to make sure, Migration thinks,
48 # we have updated all daemons already.
adb31ebb
TL
49 cephadm_module.cache.last_daemon_update['host1'] = datetime_now()
50 cephadm_module.cache.last_daemon_update['host2'] = datetime_now()
f6b5b4d7 51
f91f0fd5 52 cephadm_module.migration_current = 0
f6b5b4d7 53 cephadm_module.migration.migrate()
a4b75251 54 assert cephadm_module.migration_current >= 2
f6b5b4d7 55
f91f0fd5
TL
56 out = [o.spec.placement for o in wait(
57 cephadm_module, cephadm_module.describe_service())]
58 assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec(
59 hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])]
f6b5b4d7
TL
60
61
f67539c2 62@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
f6b5b4d7
TL
63def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator):
64 with with_host(cephadm_module, 'host1'):
f91f0fd5
TL
65 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({
66 'spec': {
67 'service_type': 'mon',
68 'service_id': 'wrong',
69 'placement': {
70 'hosts': ['host1']
71 }
72 },
adb31ebb 73 'created': datetime_to_str(datetime_now()),
f91f0fd5 74 }, sort_keys=True),
f6b5b4d7
TL
75 )
76
77 cephadm_module.spec_store.load()
78
f67539c2
TL
79 assert len(cephadm_module.spec_store.all_specs) == 1
80 assert cephadm_module.spec_store.all_specs['mon.wrong'].service_name() == 'mon'
f6b5b4d7
TL
81
82 cephadm_module.migration_current = 1
83 cephadm_module.migration.migrate()
a4b75251 84 assert cephadm_module.migration_current >= 2
f6b5b4d7 85
f67539c2
TL
86 assert len(cephadm_module.spec_store.all_specs) == 1
87 assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec(
f6b5b4d7
TL
88 service_type='mon',
89 unmanaged=True,
90 placement=PlacementSpec(hosts=['host1'])
91 )
92
f91f0fd5 93
f67539c2 94@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
f6b5b4d7
TL
95def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator):
96 with with_host(cephadm_module, 'host1'):
f91f0fd5
TL
97 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon', json.dumps({
98 'spec': {
99 'service_type': 'mon',
100 'placement': {
101 'count': 5,
102 }
103 },
adb31ebb 104 'created': datetime_to_str(datetime_now()),
f91f0fd5 105 }, sort_keys=True),
f6b5b4d7 106 )
f91f0fd5
TL
107 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({
108 'spec': {
109 'service_type': 'mon',
110 'service_id': 'wrong',
111 'placement': {
112 'hosts': ['host1']
113 }
114 },
adb31ebb 115 'created': datetime_to_str(datetime_now()),
f91f0fd5 116 }, sort_keys=True),
f6b5b4d7
TL
117 )
118
119 cephadm_module.spec_store.load()
120
f67539c2
TL
121 assert len(cephadm_module.spec_store.all_specs) == 2
122 assert cephadm_module.spec_store.all_specs['mon.wrong'].service_name() == 'mon'
123 assert cephadm_module.spec_store.all_specs['mon'].service_name() == 'mon'
f6b5b4d7
TL
124
125 cephadm_module.migration_current = 1
126 cephadm_module.migration.migrate()
a4b75251 127 assert cephadm_module.migration_current >= 2
f6b5b4d7 128
f67539c2
TL
129 assert len(cephadm_module.spec_store.all_specs) == 1
130 assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec(
f6b5b4d7
TL
131 service_type='mon',
132 unmanaged=True,
133 placement=PlacementSpec(count=5)
134 )
135
f91f0fd5 136
f67539c2 137@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
f6b5b4d7
TL
138def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator):
139 with with_host(cephadm_module, 'host1'):
f91f0fd5
TL
140 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds', json.dumps({
141 'spec': {
142 'service_type': 'mds',
143 'placement': {
144 'hosts': ['host1']
145 }
146 },
adb31ebb 147 'created': datetime_to_str(datetime_now()),
f91f0fd5 148 }, sort_keys=True),
f6b5b4d7
TL
149 )
150
151 cephadm_module.spec_store.load()
152
153 # there is nothing to migrate, as the spec is gone now.
f67539c2 154 assert len(cephadm_module.spec_store.all_specs) == 0
a4b75251
TL
155
156
157@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
158def test_migrate_nfs_initial(cephadm_module: CephadmOrchestrator):
159 with with_host(cephadm_module, 'host1'):
160 cephadm_module.set_store(
161 SPEC_STORE_PREFIX + 'mds',
162 json.dumps({
163 'spec': {
164 'service_type': 'nfs',
165 'service_id': 'foo',
166 'placement': {
167 'hosts': ['host1']
168 },
169 'spec': {
170 'pool': 'mypool',
171 'namespace': 'foons',
172 },
173 },
174 'created': datetime_to_str(datetime_now()),
175 }, sort_keys=True),
176 )
177 cephadm_module.migration_current = 1
178 cephadm_module.spec_store.load()
179
180 ls = json.loads(cephadm_module.get_store('nfs_migration_queue'))
181 assert ls == [['foo', 'mypool', 'foons']]
182
183 cephadm_module.migration.migrate(True)
184 assert cephadm_module.migration_current == 2
185
186 cephadm_module.migration.migrate()
20effc67 187 assert cephadm_module.migration_current == LAST_MIGRATION
a4b75251
TL
188
189
190@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
191def test_migrate_nfs_initial_octopus(cephadm_module: CephadmOrchestrator):
192 with with_host(cephadm_module, 'host1'):
193 cephadm_module.set_store(
194 SPEC_STORE_PREFIX + 'mds',
195 json.dumps({
196 'spec': {
197 'service_type': 'nfs',
198 'service_id': 'ganesha-foo',
199 'placement': {
200 'hosts': ['host1']
201 },
202 'spec': {
203 'pool': 'mypool',
204 'namespace': 'foons',
205 },
206 },
207 'created': datetime_to_str(datetime_now()),
208 }, sort_keys=True),
209 )
210 cephadm_module.migration_current = 1
211 cephadm_module.spec_store.load()
212
213 ls = json.loads(cephadm_module.get_store('nfs_migration_queue'))
214 assert ls == [['ganesha-foo', 'mypool', 'foons']]
215
216 cephadm_module.migration.migrate(True)
217 assert cephadm_module.migration_current == 2
218
219 cephadm_module.migration.migrate()
20effc67
TL
220 assert cephadm_module.migration_current == LAST_MIGRATION
221
222
223@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
224def test_migrate_admin_client_keyring(cephadm_module: CephadmOrchestrator):
225 assert 'client.admin' not in cephadm_module.keys.keys
226
227 cephadm_module.migration_current = 3
228 cephadm_module.migration.migrate()
229 assert cephadm_module.migration_current == LAST_MIGRATION
230
231 assert cephadm_module.keys.keys['client.admin'].placement.label == '_admin'
39ae355f
TL
232
233
234@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
235def test_migrate_set_sane_value(cephadm_module: CephadmOrchestrator):
236 cephadm_module.migration_current = 0
237 cephadm_module.migration.set_sane_migration_current()
238 assert cephadm_module.migration_current == 0
239
240 cephadm_module.migration_current = LAST_MIGRATION
241 cephadm_module.migration.set_sane_migration_current()
242 assert cephadm_module.migration_current == LAST_MIGRATION
243
244 cephadm_module.migration_current = None
245 cephadm_module.migration.set_sane_migration_current()
246 assert cephadm_module.migration_current == LAST_MIGRATION
247
248 cephadm_module.migration_current = LAST_MIGRATION + 1
249 cephadm_module.migration.set_sane_migration_current()
250 assert cephadm_module.migration_current == 0
251
252 cephadm_module.migration_current = None
253 ongoing = cephadm_module.migration.is_migration_ongoing()
254 assert not ongoing
255 assert cephadm_module.migration_current == LAST_MIGRATION
256
257 cephadm_module.migration_current = LAST_MIGRATION + 1
258 ongoing = cephadm_module.migration.is_migration_ongoing()
259 assert ongoing
260 assert cephadm_module.migration_current == 0
1e59de90
TL
261
262
05a536ef
TL
263@pytest.mark.parametrize(
264 "rgw_spec_store_entry, should_migrate",
265 [
266 ({
267 'spec': {
268 'service_type': 'rgw',
269 'service_name': 'rgw.foo',
270 'service_id': 'foo',
271 'placement': {
272 'hosts': ['host1']
273 },
274 'spec': {
275 'rgw_frontend_type': 'beast tcp_nodelay=1 request_timeout_ms=65000 rgw_thread_pool_size=512',
276 'rgw_frontend_port': '5000',
277 },
278 },
279 'created': datetime_to_str(datetime_now()),
280 }, True),
281 ({
282 'spec': {
283 'service_type': 'rgw',
284 'service_name': 'rgw.foo',
285 'service_id': 'foo',
286 'placement': {
287 'hosts': ['host1']
288 },
289 },
290 'created': datetime_to_str(datetime_now()),
291 }, False),
292 ]
293)
1e59de90 294@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
05a536ef 295def test_migrate_rgw_spec(cephadm_module: CephadmOrchestrator, rgw_spec_store_entry, should_migrate):
1e59de90
TL
296 with with_host(cephadm_module, 'host1'):
297 cephadm_module.set_store(
298 SPEC_STORE_PREFIX + 'rgw',
05a536ef 299 json.dumps(rgw_spec_store_entry, sort_keys=True),
1e59de90
TL
300 )
301
302 # make sure rgw_migration_queue is populated accordingly
303 cephadm_module.migration_current = 1
304 cephadm_module.spec_store.load()
305 ls = json.loads(cephadm_module.get_store('rgw_migration_queue'))
306 assert 'rgw' == ls[0]['spec']['service_type']
307
308 # shortcut rgw_migration_queue loading by directly assigning
309 # ls output to rgw_migration_queue list
310 cephadm_module.migration.rgw_migration_queue = ls
311
312 # skip other migrations and go directly to 5_6 migration (RGW spec)
313 cephadm_module.migration_current = 5
314 cephadm_module.migration.migrate()
315 assert cephadm_module.migration_current == LAST_MIGRATION
316
05a536ef
TL
317 if should_migrate:
318 # make sure the spec has been migrated and the the param=value entries
319 # that were part of the rgw_frontend_type are now in the new
320 # 'rgw_frontend_extra_args' list
321 assert 'rgw.foo' in cephadm_module.spec_store.all_specs
322 rgw_spec = cephadm_module.spec_store.all_specs['rgw.foo']
323 assert dict(rgw_spec.to_json()) == {'service_type': 'rgw',
324 'service_id': 'foo',
325 'service_name': 'rgw.foo',
326 'placement': {'hosts': ['host1']},
327 'spec': {
328 'rgw_frontend_extra_args': ['tcp_nodelay=1',
329 'request_timeout_ms=65000',
330 'rgw_thread_pool_size=512'],
331 'rgw_frontend_port': '5000',
332 'rgw_frontend_type': 'beast',
333 }}
334 else:
335 # in a real environment, we still expect the spec to be there,
336 # just untouched by the migration. For this test specifically
337 # though, the spec will only have ended up in the spec store
338 # if it was migrated, so we can use this to test the spec
339 # was untouched
340 assert 'rgw.foo' not in cephadm_module.spec_store.all_specs