]>
Commit | Line | Data |
---|---|---|
f6b5b4d7 TL |
1 | import json |
2 | from datetime import datetime | |
3 | ||
4 | from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec | |
5 | from cephadm import CephadmOrchestrator | |
6 | from cephadm.inventory import SPEC_STORE_PREFIX, DATEFMT | |
7 | from cephadm.tests.fixtures import _run_cephadm, cephadm_module, wait, with_host | |
8 | from tests import mock | |
9 | ||
10 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) | |
11 | @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None) | |
12 | def test_migrate_scheduler(cephadm_module: CephadmOrchestrator): | |
13 | with with_host(cephadm_module, 'host1'): | |
14 | with with_host(cephadm_module, 'host2'): | |
15 | ||
16 | # emulate the old scheduler: | |
17 | c = cephadm_module.apply_rgw( | |
18 | ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2)) | |
19 | ) | |
20 | assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' | |
21 | ||
22 | cephadm_module._apply_all_services() | |
23 | out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())} | |
24 | assert out == {'host1', 'host2'} | |
25 | ||
26 | c = cephadm_module.apply_rgw( | |
27 | ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2)) | |
28 | ) | |
29 | assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' | |
30 | ||
31 | cephadm_module.migration_current = 0 | |
32 | cephadm_module.migration.migrate() | |
33 | ||
34 | # assert we need all daemons. | |
35 | assert cephadm_module.migration_current == 0 | |
36 | ||
37 | # Sorry, for this hack, but I need to make sure, Migration thinks, | |
38 | # we have updated all daemons already. | |
39 | cephadm_module.cache.last_daemon_update['host1'] = datetime.now() | |
40 | cephadm_module.cache.last_daemon_update['host2'] = datetime.now() | |
41 | ||
42 | cephadm_module.migration.migrate() | |
43 | assert cephadm_module.migration_current == 2 | |
44 | ||
45 | out = [o.spec.placement for o in wait(cephadm_module, cephadm_module.describe_service())] | |
46 | assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec(hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])] | |
47 | ||
48 | ||
49 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) | |
50 | def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator): | |
51 | with with_host(cephadm_module, 'host1'): | |
52 | cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', | |
53 | json.dumps({ | |
54 | 'spec': { | |
55 | 'service_type': 'mon', | |
56 | 'service_id': 'wrong', | |
57 | 'placement': { | |
58 | 'hosts': ['host1'] | |
59 | } | |
60 | }, | |
61 | 'created': datetime.utcnow().strftime(DATEFMT), | |
62 | }, sort_keys=True), | |
63 | ) | |
64 | ||
65 | cephadm_module.spec_store.load() | |
66 | ||
67 | assert len(cephadm_module.spec_store.specs) == 1 | |
68 | assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon' | |
69 | ||
70 | cephadm_module.migration_current = 1 | |
71 | cephadm_module.migration.migrate() | |
72 | assert cephadm_module.migration_current == 2 | |
73 | ||
74 | assert len(cephadm_module.spec_store.specs) == 1 | |
75 | assert cephadm_module.spec_store.specs['mon'] == ServiceSpec( | |
76 | service_type='mon', | |
77 | unmanaged=True, | |
78 | placement=PlacementSpec(hosts=['host1']) | |
79 | ) | |
80 | ||
81 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) | |
82 | def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator): | |
83 | with with_host(cephadm_module, 'host1'): | |
84 | cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon', | |
85 | json.dumps({ | |
86 | 'spec': { | |
87 | 'service_type': 'mon', | |
88 | 'placement': { | |
89 | 'count': 5, | |
90 | } | |
91 | }, | |
92 | 'created': datetime.utcnow().strftime(DATEFMT), | |
93 | }, sort_keys=True), | |
94 | ) | |
95 | cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', | |
96 | json.dumps({ | |
97 | 'spec': { | |
98 | 'service_type': 'mon', | |
99 | 'service_id': 'wrong', | |
100 | 'placement': { | |
101 | 'hosts': ['host1'] | |
102 | } | |
103 | }, | |
104 | 'created': datetime.utcnow().strftime(DATEFMT), | |
105 | }, sort_keys=True), | |
106 | ) | |
107 | ||
108 | cephadm_module.spec_store.load() | |
109 | ||
110 | assert len(cephadm_module.spec_store.specs) == 2 | |
111 | assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon' | |
112 | assert cephadm_module.spec_store.specs['mon'].service_name() == 'mon' | |
113 | ||
114 | cephadm_module.migration_current = 1 | |
115 | cephadm_module.migration.migrate() | |
116 | assert cephadm_module.migration_current == 2 | |
117 | ||
118 | assert len(cephadm_module.spec_store.specs) == 1 | |
119 | assert cephadm_module.spec_store.specs['mon'] == ServiceSpec( | |
120 | service_type='mon', | |
121 | unmanaged=True, | |
122 | placement=PlacementSpec(count=5) | |
123 | ) | |
124 | ||
125 | @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) | |
126 | def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator): | |
127 | with with_host(cephadm_module, 'host1'): | |
128 | cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds', | |
129 | json.dumps({ | |
130 | 'spec': { | |
131 | 'service_type': 'mds', | |
132 | 'placement': { | |
133 | 'hosts': ['host1'] | |
134 | } | |
135 | }, | |
136 | 'created': datetime.utcnow().strftime(DATEFMT), | |
137 | }, sort_keys=True), | |
138 | ) | |
139 | ||
140 | cephadm_module.spec_store.load() | |
141 | ||
142 | # there is nothing to migrate, as the spec is gone now. | |
143 | assert len(cephadm_module.spec_store.specs) == 0 | |
144 |