]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_upgrade.py
608b68f890fcb0d70dde0d88e0c2044a767c994a
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_upgrade.py
1 import json
2 from unittest import mock
3
4 import pytest
5
6 from ceph.deployment.service_spec import PlacementSpec, ServiceSpec
7 from cephadm import CephadmOrchestrator
8 from cephadm.upgrade import CephadmUpgrade
9 from orchestrator import OrchestratorError, DaemonDescription
10 from .fixtures import _run_cephadm, wait, with_host, with_service, \
11 receive_agent_metadata, async_side_effect
12
13 from typing import List, Tuple, Optional
14
15
16 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
17 def test_upgrade_start(cephadm_module: CephadmOrchestrator):
18 with with_host(cephadm_module, 'test'):
19 with with_host(cephadm_module, 'test2'):
20 with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(count=2)), status_running=True):
21 assert wait(cephadm_module, cephadm_module.upgrade_start(
22 'image_id', None)) == 'Initiating upgrade to image_id'
23
24 assert wait(cephadm_module, cephadm_module.upgrade_status()
25 ).target_image == 'image_id'
26
27 assert wait(cephadm_module, cephadm_module.upgrade_pause()
28 ) == 'Paused upgrade to image_id'
29
30 assert wait(cephadm_module, cephadm_module.upgrade_resume()
31 ) == 'Resumed upgrade to image_id'
32
33 assert wait(cephadm_module, cephadm_module.upgrade_stop()
34 ) == 'Stopped upgrade to image_id'
35
36
37 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
38 @pytest.mark.parametrize("use_repo_digest",
39 [
40 False,
41 True
42 ])
43 def test_upgrade_run(use_repo_digest, cephadm_module: CephadmOrchestrator):
44 with with_host(cephadm_module, 'host1'):
45 with with_host(cephadm_module, 'host2'):
46 cephadm_module.set_container_image('global', 'from_image')
47 cephadm_module.use_repo_digest = use_repo_digest
48 with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(host_pattern='*', count=2)),
49 CephadmOrchestrator.apply_mgr, '', status_running=True),\
50 mock.patch("cephadm.module.CephadmOrchestrator.lookup_release_name",
51 return_value='foo'),\
52 mock.patch("cephadm.module.CephadmOrchestrator.version",
53 new_callable=mock.PropertyMock) as version_mock,\
54 mock.patch("cephadm.module.CephadmOrchestrator.get",
55 return_value={
56 # capture fields in both mon and osd maps
57 "require_osd_release": "pacific",
58 "min_mon_release": 16,
59 }):
60 version_mock.return_value = 'ceph version 18.2.1 (somehash)'
61 assert wait(cephadm_module, cephadm_module.upgrade_start(
62 'to_image', None)) == 'Initiating upgrade to to_image'
63
64 assert wait(cephadm_module, cephadm_module.upgrade_status()
65 ).target_image == 'to_image'
66
67 def _versions_mock(cmd):
68 return json.dumps({
69 'mgr': {
70 'ceph version 1.2.3 (asdf) blah': 1
71 }
72 })
73
74 cephadm_module._mon_command_mock_versions = _versions_mock
75
76 with mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json.dumps({
77 'image_id': 'image_id',
78 'repo_digests': ['to_image@repo_digest'],
79 'ceph_version': 'ceph version 18.2.3 (hash)',
80 }))):
81
82 cephadm_module.upgrade._do_upgrade()
83
84 assert cephadm_module.upgrade_status is not None
85
86 with mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
87 json.dumps([
88 dict(
89 name=list(cephadm_module.cache.daemons['host1'].keys())[0],
90 style='cephadm',
91 fsid='fsid',
92 container_id='container_id',
93 container_image_id='image_id',
94 container_image_digests=['to_image@repo_digest'],
95 deployed_by=['to_image@repo_digest'],
96 version='version',
97 state='running',
98 )
99 ])
100 )):
101 receive_agent_metadata(cephadm_module, 'host1', ['ls'])
102 receive_agent_metadata(cephadm_module, 'host2', ['ls'])
103
104 with mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json.dumps({
105 'image_id': 'image_id',
106 'repo_digests': ['to_image@repo_digest'],
107 'ceph_version': 'ceph version 18.2.3 (hash)',
108 }))):
109 cephadm_module.upgrade._do_upgrade()
110
111 _, image, _ = cephadm_module.check_mon_command({
112 'prefix': 'config get',
113 'who': 'global',
114 'key': 'container_image',
115 })
116 if use_repo_digest:
117 assert image == 'to_image@repo_digest'
118 else:
119 assert image == 'to_image'
120
121
122 def test_upgrade_state_null(cephadm_module: CephadmOrchestrator):
123 # This test validates https://tracker.ceph.com/issues/47580
124 cephadm_module.set_store('upgrade_state', 'null')
125 CephadmUpgrade(cephadm_module)
126 assert CephadmUpgrade(cephadm_module).upgrade_state is None
127
128
129 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
130 def test_not_enough_mgrs(cephadm_module: CephadmOrchestrator):
131 with with_host(cephadm_module, 'host1'):
132 with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(count=1)), CephadmOrchestrator.apply_mgr, ''):
133 with pytest.raises(OrchestratorError):
134 wait(cephadm_module, cephadm_module.upgrade_start('image_id', None))
135
136
137 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
138 @mock.patch("cephadm.CephadmOrchestrator.check_mon_command")
139 def test_enough_mons_for_ok_to_stop(check_mon_command, cephadm_module: CephadmOrchestrator):
140 # only 2 monitors, not enough for ok-to-stop to ever pass
141 check_mon_command.return_value = (
142 0, '{"monmap": {"mons": [{"name": "mon.1"}, {"name": "mon.2"}]}}', '')
143 assert not cephadm_module.upgrade._enough_mons_for_ok_to_stop()
144
145 # 3 monitors, ok-to-stop should work fine
146 check_mon_command.return_value = (
147 0, '{"monmap": {"mons": [{"name": "mon.1"}, {"name": "mon.2"}, {"name": "mon.3"}]}}', '')
148 assert cephadm_module.upgrade._enough_mons_for_ok_to_stop()
149
150
151 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
152 @mock.patch("cephadm.module.HostCache.get_daemons_by_service")
153 @mock.patch("cephadm.CephadmOrchestrator.get")
154 def test_enough_mds_for_ok_to_stop(get, get_daemons_by_service, cephadm_module: CephadmOrchestrator):
155 get.side_effect = [{'filesystems': [{'mdsmap': {'fs_name': 'test', 'max_mds': 1}}]}]
156 get_daemons_by_service.side_effect = [[DaemonDescription()]]
157 assert not cephadm_module.upgrade._enough_mds_for_ok_to_stop(
158 DaemonDescription(daemon_type='mds', daemon_id='test.host1.gfknd', service_name='mds.test'))
159
160 get.side_effect = [{'filesystems': [{'mdsmap': {'fs_name': 'myfs.test', 'max_mds': 2}}]}]
161 get_daemons_by_service.side_effect = [[DaemonDescription(), DaemonDescription()]]
162 assert not cephadm_module.upgrade._enough_mds_for_ok_to_stop(
163 DaemonDescription(daemon_type='mds', daemon_id='myfs.test.host1.gfknd', service_name='mds.myfs.test'))
164
165 get.side_effect = [{'filesystems': [{'mdsmap': {'fs_name': 'myfs.test', 'max_mds': 1}}]}]
166 get_daemons_by_service.side_effect = [[DaemonDescription(), DaemonDescription()]]
167 assert cephadm_module.upgrade._enough_mds_for_ok_to_stop(
168 DaemonDescription(daemon_type='mds', daemon_id='myfs.test.host1.gfknd', service_name='mds.myfs.test'))
169
170
171 @pytest.mark.parametrize("current_version, use_tags, show_all_versions, tags, result",
172 [
173 # several candidate versions (from different major versions)
174 (
175 (16, 1, '16.1.0'),
176 False, # use_tags
177 False, # show_all_versions
178 [
179 'v17.1.0',
180 'v16.2.7',
181 'v16.2.6',
182 'v16.2.5',
183 'v16.1.4',
184 'v16.1.3',
185 'v15.2.0',
186 ],
187 ['17.1.0', '16.2.7', '16.2.6', '16.2.5', '16.1.4', '16.1.3']
188 ),
189 # candidate minor versions are available
190 (
191 (16, 1, '16.1.0'),
192 False, # use_tags
193 False, # show_all_versions
194 [
195 'v16.2.2',
196 'v16.2.1',
197 'v16.1.6',
198 ],
199 ['16.2.2', '16.2.1', '16.1.6']
200 ),
201 # all versions are less than the current version
202 (
203 (17, 2, '17.2.0'),
204 False, # use_tags
205 False, # show_all_versions
206 [
207 'v17.1.0',
208 'v16.2.7',
209 'v16.2.6',
210 ],
211 []
212 ),
213 # show all versions (regardless of the current version)
214 (
215 (16, 1, '16.1.0'),
216 False, # use_tags
217 True, # show_all_versions
218 [
219 'v17.1.0',
220 'v16.2.7',
221 'v16.2.6',
222 'v15.1.0',
223 'v14.2.0',
224 ],
225 ['17.1.0', '16.2.7', '16.2.6', '15.1.0', '14.2.0']
226 ),
227 # show all tags (regardless of the current version and show_all_versions flag)
228 (
229 (16, 1, '16.1.0'),
230 True, # use_tags
231 False, # show_all_versions
232 [
233 'v17.1.0',
234 'v16.2.7',
235 'v16.2.6',
236 'v16.2.5',
237 'v16.1.4',
238 'v16.1.3',
239 'v15.2.0',
240 ],
241 ['v15.2.0', 'v16.1.3', 'v16.1.4', 'v16.2.5',
242 'v16.2.6', 'v16.2.7', 'v17.1.0']
243 ),
244 ])
245 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
246 def test_upgrade_ls(current_version, use_tags, show_all_versions, tags, result, cephadm_module: CephadmOrchestrator):
247 with mock.patch('cephadm.upgrade.Registry.get_tags', return_value=tags):
248 with mock.patch('cephadm.upgrade.CephadmUpgrade._get_current_version', return_value=current_version):
249 out = cephadm_module.upgrade.upgrade_ls(None, use_tags, show_all_versions)
250 if use_tags:
251 assert out['tags'] == result
252 else:
253 assert out['versions'] == result
254
255
256 @pytest.mark.parametrize(
257 "upgraded, not_upgraded, daemon_types, hosts, services, should_block",
258 # [ ([(type, host, id), ... ], [...], [daemon types], [hosts], [services], True/False), ... ]
259 [
260 ( # valid, upgrade mgr daemons
261 [],
262 [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')],
263 ['mgr'],
264 None,
265 None,
266 False
267 ),
268 ( # invalid, can't upgrade mons until mgr is upgraded
269 [],
270 [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')],
271 ['mon'],
272 None,
273 None,
274 True
275 ),
276 ( # invalid, can't upgrade mon service until all mgr daemons are upgraded
277 [],
278 [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')],
279 None,
280 None,
281 ['mon'],
282 True
283 ),
284 ( # valid, upgrade mgr service
285 [],
286 [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')],
287 None,
288 None,
289 ['mgr'],
290 False
291 ),
292 ( # valid, mgr is already upgraded so can upgrade mons
293 [('mgr', 'a', 'a.x')],
294 [('mon', 'a', 'a')],
295 ['mon'],
296 None,
297 None,
298 False
299 ),
300 ( # invalid, can't upgrade all daemons on b b/c un-upgraded mgr on a
301 [],
302 [('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
303 None,
304 ['a'],
305 None,
306 True
307 ),
308 ( # valid, only daemon on b is a mgr
309 [],
310 [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
311 None,
312 ['b'],
313 None,
314 False
315 ),
316 ( # invalid, can't upgrade mon on a while mgr on b is un-upgraded
317 [],
318 [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
319 None,
320 ['a'],
321 None,
322 True
323 ),
324 ( # valid, only upgrading the mgr on a
325 [],
326 [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
327 ['mgr'],
328 ['a'],
329 None,
330 False
331 ),
332 ( # valid, mgr daemon not on b are upgraded
333 [('mgr', 'a', 'a.x')],
334 [('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
335 None,
336 ['b'],
337 None,
338 False
339 ),
340 ( # valid, all the necessary hosts are covered, mgr on c is already upgraded
341 [('mgr', 'c', 'c.z')],
342 [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a'), ('osd', 'c', '0')],
343 None,
344 ['a', 'b'],
345 None,
346 False
347 ),
348 ( # invalid, can't upgrade mon on a while mgr on b is un-upgraded
349 [],
350 [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
351 ['mgr', 'mon'],
352 ['a'],
353 None,
354 True
355 ),
356 ( # valid, only mon not on "b" is upgraded already. Case hit while making teuthology test
357 [('mon', 'a', 'a')],
358 [('mon', 'b', 'x'), ('mon', 'b', 'y'), ('osd', 'a', '1'), ('osd', 'b', '2')],
359 ['mon', 'osd'],
360 ['b'],
361 None,
362 False
363 ),
364 ]
365 )
366 @mock.patch("cephadm.module.HostCache.get_daemons")
367 @mock.patch("cephadm.serve.CephadmServe._get_container_image_info")
368 @mock.patch('cephadm.module.SpecStore.__getitem__')
369 def test_staggered_upgrade_validation(
370 get_spec,
371 get_image_info,
372 get_daemons,
373 upgraded: List[Tuple[str, str, str]],
374 not_upgraded: List[Tuple[str, str, str, str]],
375 daemon_types: Optional[str],
376 hosts: Optional[str],
377 services: Optional[str],
378 should_block: bool,
379 cephadm_module: CephadmOrchestrator,
380 ):
381 def to_dds(ts: List[Tuple[str, str]], upgraded: bool) -> List[DaemonDescription]:
382 dds = []
383 digest = 'new_image@repo_digest' if upgraded else 'old_image@repo_digest'
384 for t in ts:
385 dds.append(DaemonDescription(daemon_type=t[0],
386 hostname=t[1],
387 daemon_id=t[2],
388 container_image_digests=[digest],
389 deployed_by=[digest],))
390 return dds
391 get_daemons.return_value = to_dds(upgraded, True) + to_dds(not_upgraded, False)
392 get_image_info.side_effect = async_side_effect(
393 ('new_id', 'ceph version 99.99.99 (hash)', ['new_image@repo_digest']))
394
395 class FakeSpecDesc():
396 def __init__(self, spec):
397 self.spec = spec
398
399 def _get_spec(s):
400 return FakeSpecDesc(ServiceSpec(s))
401
402 get_spec.side_effect = _get_spec
403 if should_block:
404 with pytest.raises(OrchestratorError):
405 cephadm_module.upgrade._validate_upgrade_filters(
406 'new_image_name', daemon_types, hosts, services)
407 else:
408 cephadm_module.upgrade._validate_upgrade_filters(
409 'new_image_name', daemon_types, hosts, services)