]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_migration.py
Import ceph 15.2.8
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_migration.py
1 import json
2 from datetime import datetime
3
4 import pytest
5
6 from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec
7 from cephadm import CephadmOrchestrator
8 from cephadm.inventory import SPEC_STORE_PREFIX
9 from cephadm.utils import DATEFMT
10 from cephadm.tests.fixtures import _run_cephadm, cephadm_module, wait, with_host
11 from orchestrator import OrchestratorError
12 from cephadm.serve import CephadmServe
13 from tests import mock
14
15
16 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
17 @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _, __, ___: None)
18 def test_migrate_scheduler(cephadm_module: CephadmOrchestrator):
19 with with_host(cephadm_module, 'host1', refresh_hosts=False):
20 with with_host(cephadm_module, 'host2', refresh_hosts=False):
21
22 # emulate the old scheduler:
23 c = cephadm_module.apply_rgw(
24 ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2))
25 )
26 assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'
27
28 # with pytest.raises(OrchestratorError, match="cephadm migration still ongoing. Please wait, until the migration is complete."):
29 CephadmServe(cephadm_module)._apply_all_services()
30
31 cephadm_module.migration_current = 0
32 cephadm_module.migration.migrate()
33 # assert we need all daemons.
34 assert cephadm_module.migration_current == 0
35
36 CephadmServe(cephadm_module)._refresh_hosts_and_daemons()
37 cephadm_module.migration.migrate()
38
39 CephadmServe(cephadm_module)._apply_all_services()
40
41 out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())}
42 assert out == {'host1', 'host2'}
43
44 c = cephadm_module.apply_rgw(
45 ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2))
46 )
47 assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'
48
49 # Sorry, for this hack, but I need to make sure, Migration thinks,
50 # we have updated all daemons already.
51 cephadm_module.cache.last_daemon_update['host1'] = datetime.now()
52 cephadm_module.cache.last_daemon_update['host2'] = datetime.now()
53
54 cephadm_module.migration_current = 0
55 cephadm_module.migration.migrate()
56 assert cephadm_module.migration_current == 2
57
58 out = [o.spec.placement for o in wait(
59 cephadm_module, cephadm_module.describe_service())]
60 assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec(
61 hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])]
62
63
64 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
65 def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator):
66 with with_host(cephadm_module, 'host1'):
67 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({
68 'spec': {
69 'service_type': 'mon',
70 'service_id': 'wrong',
71 'placement': {
72 'hosts': ['host1']
73 }
74 },
75 'created': datetime.utcnow().strftime(DATEFMT),
76 }, sort_keys=True),
77 )
78
79 cephadm_module.spec_store.load()
80
81 assert len(cephadm_module.spec_store.specs) == 1
82 assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon'
83
84 cephadm_module.migration_current = 1
85 cephadm_module.migration.migrate()
86 assert cephadm_module.migration_current == 2
87
88 assert len(cephadm_module.spec_store.specs) == 1
89 assert cephadm_module.spec_store.specs['mon'] == ServiceSpec(
90 service_type='mon',
91 unmanaged=True,
92 placement=PlacementSpec(hosts=['host1'])
93 )
94
95
96 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
97 def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator):
98 with with_host(cephadm_module, 'host1'):
99 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon', json.dumps({
100 'spec': {
101 'service_type': 'mon',
102 'placement': {
103 'count': 5,
104 }
105 },
106 'created': datetime.utcnow().strftime(DATEFMT),
107 }, sort_keys=True),
108 )
109 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', json.dumps({
110 'spec': {
111 'service_type': 'mon',
112 'service_id': 'wrong',
113 'placement': {
114 'hosts': ['host1']
115 }
116 },
117 'created': datetime.utcnow().strftime(DATEFMT),
118 }, sort_keys=True),
119 )
120
121 cephadm_module.spec_store.load()
122
123 assert len(cephadm_module.spec_store.specs) == 2
124 assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon'
125 assert cephadm_module.spec_store.specs['mon'].service_name() == 'mon'
126
127 cephadm_module.migration_current = 1
128 cephadm_module.migration.migrate()
129 assert cephadm_module.migration_current == 2
130
131 assert len(cephadm_module.spec_store.specs) == 1
132 assert cephadm_module.spec_store.specs['mon'] == ServiceSpec(
133 service_type='mon',
134 unmanaged=True,
135 placement=PlacementSpec(count=5)
136 )
137
138
139 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
140 def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator):
141 with with_host(cephadm_module, 'host1'):
142 cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds', json.dumps({
143 'spec': {
144 'service_type': 'mds',
145 'placement': {
146 'hosts': ['host1']
147 }
148 },
149 'created': datetime.utcnow().strftime(DATEFMT),
150 }, sort_keys=True),
151 )
152
153 cephadm_module.spec_store.load()
154
155 # there is nothing to migrate, as the spec is gone now.
156 assert len(cephadm_module.spec_store.specs) == 0