3 from ceph
.deployment
.service_spec
import PlacementSpec
, ServiceSpec
, HostPlacementSpec
4 from ceph
.utils
import datetime_to_str
, datetime_now
5 from cephadm
import CephadmOrchestrator
6 from cephadm
.inventory
import SPEC_STORE_PREFIX
7 from cephadm
.migrations
import LAST_MIGRATION
8 from cephadm
.tests
.fixtures
import _run_cephadm
, wait
, with_host
, receive_agent_metadata_all_hosts
9 from cephadm
.serve
import CephadmServe
10 from tests
import mock
13 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
14 def test_migrate_scheduler(cephadm_module
: CephadmOrchestrator
):
15 with
with_host(cephadm_module
, 'host1', refresh_hosts
=False):
16 with
with_host(cephadm_module
, 'host2', refresh_hosts
=False):
18 # emulate the old scheduler:
19 c
= cephadm_module
.apply_rgw(
20 ServiceSpec('rgw', 'r.z', placement
=PlacementSpec(host_pattern
='*', count
=2))
22 assert wait(cephadm_module
, c
) == 'Scheduled rgw.r.z update...'
24 # with pytest.raises(OrchestratorError, match="cephadm migration still ongoing. Please wait, until the migration is complete."):
25 CephadmServe(cephadm_module
)._apply
_all
_services
()
27 cephadm_module
.migration_current
= 0
28 cephadm_module
.migration
.migrate()
29 # assert we need all daemons.
30 assert cephadm_module
.migration_current
== 0
32 CephadmServe(cephadm_module
)._refresh
_hosts
_and
_daemons
()
33 receive_agent_metadata_all_hosts(cephadm_module
)
34 cephadm_module
.migration
.migrate()
36 CephadmServe(cephadm_module
)._apply
_all
_services
()
38 out
= {o
.hostname
for o
in wait(cephadm_module
, cephadm_module
.list_daemons())}
39 assert out
== {'host1', 'host2'}
41 c
= cephadm_module
.apply_rgw(
42 ServiceSpec('rgw', 'r.z', placement
=PlacementSpec(host_pattern
='host1', count
=2))
44 assert wait(cephadm_module
, c
) == 'Scheduled rgw.r.z update...'
46 # Sorry, for this hack, but I need to make sure, Migration thinks,
47 # we have updated all daemons already.
48 cephadm_module
.cache
.last_daemon_update
['host1'] = datetime_now()
49 cephadm_module
.cache
.last_daemon_update
['host2'] = datetime_now()
51 cephadm_module
.migration_current
= 0
52 cephadm_module
.migration
.migrate()
53 assert cephadm_module
.migration_current
>= 2
55 out
= [o
.spec
.placement
for o
in wait(
56 cephadm_module
, cephadm_module
.describe_service())]
57 assert out
== [PlacementSpec(count
=2, hosts
=[HostPlacementSpec(
58 hostname
='host1', network
='', name
=''), HostPlacementSpec(hostname
='host2', network
='', name
='')])]
61 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
62 def test_migrate_service_id_mon_one(cephadm_module
: CephadmOrchestrator
):
63 with
with_host(cephadm_module
, 'host1'):
64 cephadm_module
.set_store(SPEC_STORE_PREFIX
+ 'mon.wrong', json
.dumps({
66 'service_type': 'mon',
67 'service_id': 'wrong',
72 'created': datetime_to_str(datetime_now()),
76 cephadm_module
.spec_store
.load()
78 assert len(cephadm_module
.spec_store
.all_specs
) == 1
79 assert cephadm_module
.spec_store
.all_specs
['mon.wrong'].service_name() == 'mon'
81 cephadm_module
.migration_current
= 1
82 cephadm_module
.migration
.migrate()
83 assert cephadm_module
.migration_current
>= 2
85 assert len(cephadm_module
.spec_store
.all_specs
) == 1
86 assert cephadm_module
.spec_store
.all_specs
['mon'] == ServiceSpec(
89 placement
=PlacementSpec(hosts
=['host1'])
93 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
94 def test_migrate_service_id_mon_two(cephadm_module
: CephadmOrchestrator
):
95 with
with_host(cephadm_module
, 'host1'):
96 cephadm_module
.set_store(SPEC_STORE_PREFIX
+ 'mon', json
.dumps({
98 'service_type': 'mon',
103 'created': datetime_to_str(datetime_now()),
106 cephadm_module
.set_store(SPEC_STORE_PREFIX
+ 'mon.wrong', json
.dumps({
108 'service_type': 'mon',
109 'service_id': 'wrong',
114 'created': datetime_to_str(datetime_now()),
118 cephadm_module
.spec_store
.load()
120 assert len(cephadm_module
.spec_store
.all_specs
) == 2
121 assert cephadm_module
.spec_store
.all_specs
['mon.wrong'].service_name() == 'mon'
122 assert cephadm_module
.spec_store
.all_specs
['mon'].service_name() == 'mon'
124 cephadm_module
.migration_current
= 1
125 cephadm_module
.migration
.migrate()
126 assert cephadm_module
.migration_current
>= 2
128 assert len(cephadm_module
.spec_store
.all_specs
) == 1
129 assert cephadm_module
.spec_store
.all_specs
['mon'] == ServiceSpec(
132 placement
=PlacementSpec(count
=5)
136 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
137 def test_migrate_service_id_mds_one(cephadm_module
: CephadmOrchestrator
):
138 with
with_host(cephadm_module
, 'host1'):
139 cephadm_module
.set_store(SPEC_STORE_PREFIX
+ 'mds', json
.dumps({
141 'service_type': 'mds',
146 'created': datetime_to_str(datetime_now()),
150 cephadm_module
.spec_store
.load()
152 # there is nothing to migrate, as the spec is gone now.
153 assert len(cephadm_module
.spec_store
.all_specs
) == 0
156 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
157 def test_migrate_nfs_initial(cephadm_module
: CephadmOrchestrator
):
158 with
with_host(cephadm_module
, 'host1'):
159 cephadm_module
.set_store(
160 SPEC_STORE_PREFIX
+ 'mds',
163 'service_type': 'nfs',
170 'namespace': 'foons',
173 'created': datetime_to_str(datetime_now()),
176 cephadm_module
.migration_current
= 1
177 cephadm_module
.spec_store
.load()
179 ls
= json
.loads(cephadm_module
.get_store('nfs_migration_queue'))
180 assert ls
== [['foo', 'mypool', 'foons']]
182 cephadm_module
.migration
.migrate(True)
183 assert cephadm_module
.migration_current
== 2
185 cephadm_module
.migration
.migrate()
186 assert cephadm_module
.migration_current
== LAST_MIGRATION
189 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
190 def test_migrate_nfs_initial_octopus(cephadm_module
: CephadmOrchestrator
):
191 with
with_host(cephadm_module
, 'host1'):
192 cephadm_module
.set_store(
193 SPEC_STORE_PREFIX
+ 'mds',
196 'service_type': 'nfs',
197 'service_id': 'ganesha-foo',
203 'namespace': 'foons',
206 'created': datetime_to_str(datetime_now()),
209 cephadm_module
.migration_current
= 1
210 cephadm_module
.spec_store
.load()
212 ls
= json
.loads(cephadm_module
.get_store('nfs_migration_queue'))
213 assert ls
== [['ganesha-foo', 'mypool', 'foons']]
215 cephadm_module
.migration
.migrate(True)
216 assert cephadm_module
.migration_current
== 2
218 cephadm_module
.migration
.migrate()
219 assert cephadm_module
.migration_current
== LAST_MIGRATION
222 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
223 def test_migrate_admin_client_keyring(cephadm_module
: CephadmOrchestrator
):
224 assert 'client.admin' not in cephadm_module
.keys
.keys
226 cephadm_module
.migration_current
= 3
227 cephadm_module
.migration
.migrate()
228 assert cephadm_module
.migration_current
== LAST_MIGRATION
230 assert cephadm_module
.keys
.keys
['client.admin'].placement
.label
== '_admin'