]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_migration.py
import 15.2.9
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_migration.py
1 import json
2
3 import pytest
4
5 from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec
6 from ceph.utils import datetime_to_str, datetime_now
7 from cephadm import CephadmOrchestrator
8 from cephadm.inventory import SPEC_STORE_PREFIX
9 from cephadm.tests.fixtures import _run_cephadm, cephadm_module, wait, with_host
10 from orchestrator import OrchestratorError
11 from cephadm.serve import CephadmServe
12 from tests import mock
13
14
15 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
16 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _, __, ___: None)
17 def test_migrate_scheduler(cephadm_module: CephadmOrchestrator):
18 with with_host(cephadm_module, 'host1', refresh_hosts=False):
19 with with_host(cephadm_module, 'host2', refresh_hosts=False):
20
21 # emulate the old scheduler:
22 c = cephadm_module.apply_rgw(
23 ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2))
24 )
25 assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'
26
27 # with pytest.raises(OrchestratorError, match="cephadm migration still ongoing. Please wait, until the migration is complete."):
28 CephadmServe(cephadm_module)._apply_all_services()
29
30 cephadm_module.migration_current = 0
31 cephadm_module.migration.migrate()
32 # assert we need all daemons.
33 assert cephadm_module.migration_current == 0
34
35 CephadmServe(cephadm_module)._refresh_hosts_and_daemons()
36 cephadm_module.migration.migrate()
37
38 CephadmServe(cephadm_module)._apply_all_services()
39
40 out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())}
41 assert out == {'host1', 'host2'}
42
43 c = cephadm_module.apply_rgw(
44 ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2))
45 )
46 assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'
47
48 # Sorry, for this hack, but I need to make sure, Migration thinks,
49 # we have updated all daemons already.
50 cephadm_module.cache.last_daemon_update['host1'] = datetime_now()
51 cephadm_module.cache.last_daemon_update['host2'] = datetime_now()
52
53 cephadm_module.migration_current = 0
54 cephadm_module.migration.migrate()
55 assert cephadm_module.migration_current == 2
56
57 out = [o.spec.placement for o in wait(
58 cephadm_module, cephadm_module.describe_service())]
59 assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec(
60 hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])]
61
62
63 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
64 def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator):
65 with with_host(cephadm_module, 'host1'):
66 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({
67 'spec': {
68 'service_type': 'mon',
69 'service_id': 'wrong',
70 'placement': {
71 'hosts': ['host1']
72 }
73 },
74 'created': datetime_to_str(datetime_now()),
75 }, sort_keys=True),
76 )
77
78 cephadm_module.spec_store.load()
79
80 assert len(cephadm_module.spec_store.specs) == 1
81 assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon'
82
83 cephadm_module.migration_current = 1
84 cephadm_module.migration.migrate()
85 assert cephadm_module.migration_current == 2
86
87 assert len(cephadm_module.spec_store.specs) == 1
88 assert cephadm_module.spec_store.specs['mon'] == ServiceSpec(
89 service_type='mon',
90 unmanaged=True,
91 placement=PlacementSpec(hosts=['host1'])
92 )
93
94
95 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
96 def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator):
97 with with_host(cephadm_module, 'host1'):
98 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon', json.dumps({
99 'spec': {
100 'service_type': 'mon',
101 'placement': {
102 'count': 5,
103 }
104 },
105 'created': datetime_to_str(datetime_now()),
106 }, sort_keys=True),
107 )
108 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({
109 'spec': {
110 'service_type': 'mon',
111 'service_id': 'wrong',
112 'placement': {
113 'hosts': ['host1']
114 }
115 },
116 'created': datetime_to_str(datetime_now()),
117 }, sort_keys=True),
118 )
119
120 cephadm_module.spec_store.load()
121
122 assert len(cephadm_module.spec_store.specs) == 2
123 assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon'
124 assert cephadm_module.spec_store.specs['mon'].service_name() == 'mon'
125
126 cephadm_module.migration_current = 1
127 cephadm_module.migration.migrate()
128 assert cephadm_module.migration_current == 2
129
130 assert len(cephadm_module.spec_store.specs) == 1
131 assert cephadm_module.spec_store.specs['mon'] == ServiceSpec(
132 service_type='mon',
133 unmanaged=True,
134 placement=PlacementSpec(count=5)
135 )
136
137
138 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
139 def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator):
140 with with_host(cephadm_module, 'host1'):
141 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds', json.dumps({
142 'spec': {
143 'service_type': 'mds',
144 'placement': {
145 'hosts': ['host1']
146 }
147 },
148 'created': datetime_to_str(datetime_now()),
149 }, sort_keys=True),
150 )
151
152 cephadm_module.spec_store.load()
153
154 # there is nothing to migrate, as the spec is gone now.
155 assert len(cephadm_module.spec_store.specs) == 0