]>
git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_upgrade.py
393fc37aaff06a374ac56caa0c8f4bd438f0b02a
2 from unittest
import mock
6 from ceph
.deployment
.service_spec
import PlacementSpec
, ServiceSpec
7 from cephadm
import CephadmOrchestrator
8 from cephadm
.upgrade
import CephadmUpgrade
, UpgradeState
9 from cephadm
.ssh
import HostConnectionError
10 from orchestrator
import OrchestratorError
, DaemonDescription
11 from .fixtures
import _run_cephadm
, wait
, with_host
, with_service
, \
12 receive_agent_metadata
, async_side_effect
14 from typing
import List
, Tuple
, Optional
17 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
18 def test_upgrade_start(cephadm_module
: CephadmOrchestrator
):
19 with
with_host(cephadm_module
, 'test'):
20 with
with_host(cephadm_module
, 'test2'):
21 with
with_service(cephadm_module
, ServiceSpec('mgr', placement
=PlacementSpec(count
=2)), status_running
=True):
22 assert wait(cephadm_module
, cephadm_module
.upgrade_start(
23 'image_id', None)) == 'Initiating upgrade to image_id'
25 assert wait(cephadm_module
, cephadm_module
.upgrade_status()
26 ).target_image
== 'image_id'
28 assert wait(cephadm_module
, cephadm_module
.upgrade_pause()
29 ) == 'Paused upgrade to image_id'
31 assert wait(cephadm_module
, cephadm_module
.upgrade_resume()
32 ) == 'Resumed upgrade to image_id'
34 assert wait(cephadm_module
, cephadm_module
.upgrade_stop()
35 ) == 'Stopped upgrade to image_id'
38 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
39 def test_upgrade_start_offline_hosts(cephadm_module
: CephadmOrchestrator
):
40 with
with_host(cephadm_module
, 'test'):
41 with
with_host(cephadm_module
, 'test2'):
42 cephadm_module
.offline_hosts
= set(['test2'])
43 with pytest
.raises(OrchestratorError
, match
=r
"Upgrade aborted - Some host\(s\) are currently offline: {'test2'}"):
44 cephadm_module
.upgrade_start('image_id', None)
45 cephadm_module
.offline_hosts
= set([]) # so remove_host doesn't fail when leaving the with_host block
48 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
49 def test_upgrade_daemons_offline_hosts(cephadm_module
: CephadmOrchestrator
):
50 with
with_host(cephadm_module
, 'test'):
51 with
with_host(cephadm_module
, 'test2'):
52 cephadm_module
.upgrade
.upgrade_state
= UpgradeState('target_image', 0)
53 with mock
.patch("cephadm.serve.CephadmServe._run_cephadm", side_effect
=HostConnectionError('connection failure reason', 'test2', '192.168.122.1')):
54 _to_upgrade
= [(DaemonDescription(daemon_type
='crash', daemon_id
='test2', hostname
='test2'), True)]
55 with pytest
.raises(HostConnectionError
, match
=r
"connection failure reason"):
56 cephadm_module
.upgrade
._upgrade
_daemons
(_to_upgrade
, 'target_image', ['digest1'])
59 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
60 def test_do_upgrade_offline_hosts(cephadm_module
: CephadmOrchestrator
):
61 with
with_host(cephadm_module
, 'test'):
62 with
with_host(cephadm_module
, 'test2'):
63 cephadm_module
.upgrade
.upgrade_state
= UpgradeState('target_image', 0)
64 cephadm_module
.offline_hosts
= set(['test2'])
65 with pytest
.raises(HostConnectionError
, match
=r
"Host\(s\) were marked offline: {'test2'}"):
66 cephadm_module
.upgrade
._do
_upgrade
()
67 cephadm_module
.offline_hosts
= set([]) # so remove_host doesn't fail when leaving the with_host block
70 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
71 @mock.patch("cephadm.module.CephadmOrchestrator.remove_health_warning")
72 def test_upgrade_resume_clear_health_warnings(_rm_health_warning
, cephadm_module
: CephadmOrchestrator
):
73 with
with_host(cephadm_module
, 'test'):
74 with
with_host(cephadm_module
, 'test2'):
75 cephadm_module
.upgrade
.upgrade_state
= UpgradeState('target_image', 0, paused
=True)
76 _rm_health_warning
.return_value
= None
77 assert wait(cephadm_module
, cephadm_module
.upgrade_resume()
78 ) == 'Resumed upgrade to target_image'
79 calls_list
= [mock
.call(alert_id
) for alert_id
in cephadm_module
.upgrade
.UPGRADE_ERRORS
]
80 _rm_health_warning
.assert_has_calls(calls_list
, any_order
=True)
83 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
84 @pytest.mark
.parametrize("use_repo_digest",
89 def test_upgrade_run(use_repo_digest
, cephadm_module
: CephadmOrchestrator
):
90 with
with_host(cephadm_module
, 'host1'):
91 with
with_host(cephadm_module
, 'host2'):
92 cephadm_module
.set_container_image('global', 'from_image')
93 cephadm_module
.use_repo_digest
= use_repo_digest
94 with
with_service(cephadm_module
, ServiceSpec('mgr', placement
=PlacementSpec(host_pattern
='*', count
=2)),
95 CephadmOrchestrator
.apply_mgr
, '', status_running
=True),\
96 mock
.patch("cephadm.module.CephadmOrchestrator.lookup_release_name",
98 mock
.patch("cephadm.module.CephadmOrchestrator.version",
99 new_callable
=mock
.PropertyMock
) as version_mock
,\
100 mock
.patch("cephadm.module.CephadmOrchestrator.get",
102 # capture fields in both mon and osd maps
103 "require_osd_release": "pacific",
104 "min_mon_release": 16,
106 version_mock
.return_value
= 'ceph version 18.2.1 (somehash)'
107 assert wait(cephadm_module
, cephadm_module
.upgrade_start(
108 'to_image', None)) == 'Initiating upgrade to to_image'
110 assert wait(cephadm_module
, cephadm_module
.upgrade_status()
111 ).target_image
== 'to_image'
113 def _versions_mock(cmd
):
116 'ceph version 1.2.3 (asdf) blah': 1
120 cephadm_module
._mon
_command
_mock
_versions
= _versions_mock
122 with mock
.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json
.dumps({
123 'image_id': 'image_id',
124 'repo_digests': ['to_image@repo_digest'],
125 'ceph_version': 'ceph version 18.2.3 (hash)',
128 cephadm_module
.upgrade
._do
_upgrade
()
130 assert cephadm_module
.upgrade_status
is not None
132 with mock
.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
135 name
=list(cephadm_module
.cache
.daemons
['host1'].keys())[0],
138 container_id
='container_id',
139 container_image_name
='to_image',
140 container_image_id
='image_id',
141 container_image_digests
=['to_image@repo_digest'],
142 deployed_by
=['to_image@repo_digest'],
148 receive_agent_metadata(cephadm_module
, 'host1', ['ls'])
149 receive_agent_metadata(cephadm_module
, 'host2', ['ls'])
151 with mock
.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json
.dumps({
152 'image_id': 'image_id',
153 'repo_digests': ['to_image@repo_digest'],
154 'ceph_version': 'ceph version 18.2.3 (hash)',
156 cephadm_module
.upgrade
._do
_upgrade
()
158 _
, image
, _
= cephadm_module
.check_mon_command({
159 'prefix': 'config get',
161 'key': 'container_image',
164 assert image
== 'to_image@repo_digest'
166 assert image
== 'to_image'
169 def test_upgrade_state_null(cephadm_module
: CephadmOrchestrator
):
170 # This test validates https://tracker.ceph.com/issues/47580
171 cephadm_module
.set_store('upgrade_state', 'null')
172 CephadmUpgrade(cephadm_module
)
173 assert CephadmUpgrade(cephadm_module
).upgrade_state
is None
176 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
177 def test_not_enough_mgrs(cephadm_module
: CephadmOrchestrator
):
178 with
with_host(cephadm_module
, 'host1'):
179 with
with_service(cephadm_module
, ServiceSpec('mgr', placement
=PlacementSpec(count
=1)), CephadmOrchestrator
.apply_mgr
, ''):
180 with pytest
.raises(OrchestratorError
):
181 wait(cephadm_module
, cephadm_module
.upgrade_start('image_id', None))
184 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
185 @mock.patch("cephadm.CephadmOrchestrator.check_mon_command")
186 def test_enough_mons_for_ok_to_stop(check_mon_command
, cephadm_module
: CephadmOrchestrator
):
187 # only 2 monitors, not enough for ok-to-stop to ever pass
188 check_mon_command
.return_value
= (
189 0, '{"monmap": {"mons": [{"name": "mon.1"}, {"name": "mon.2"}]}}', '')
190 assert not cephadm_module
.upgrade
._enough
_mons
_for
_ok
_to
_stop
()
192 # 3 monitors, ok-to-stop should work fine
193 check_mon_command
.return_value
= (
194 0, '{"monmap": {"mons": [{"name": "mon.1"}, {"name": "mon.2"}, {"name": "mon.3"}]}}', '')
195 assert cephadm_module
.upgrade
._enough
_mons
_for
_ok
_to
_stop
()
198 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
199 @mock.patch("cephadm.module.HostCache.get_daemons_by_service")
200 @mock.patch("cephadm.CephadmOrchestrator.get")
201 def test_enough_mds_for_ok_to_stop(get
, get_daemons_by_service
, cephadm_module
: CephadmOrchestrator
):
202 get
.side_effect
= [{'filesystems': [{'mdsmap': {'fs_name': 'test', 'max_mds': 1}}]}]
203 get_daemons_by_service
.side_effect
= [[DaemonDescription()]]
204 assert not cephadm_module
.upgrade
._enough
_mds
_for
_ok
_to
_stop
(
205 DaemonDescription(daemon_type
='mds', daemon_id
='test.host1.gfknd', service_name
='mds.test'))
207 get
.side_effect
= [{'filesystems': [{'mdsmap': {'fs_name': 'myfs.test', 'max_mds': 2}}]}]
208 get_daemons_by_service
.side_effect
= [[DaemonDescription(), DaemonDescription()]]
209 assert not cephadm_module
.upgrade
._enough
_mds
_for
_ok
_to
_stop
(
210 DaemonDescription(daemon_type
='mds', daemon_id
='myfs.test.host1.gfknd', service_name
='mds.myfs.test'))
212 get
.side_effect
= [{'filesystems': [{'mdsmap': {'fs_name': 'myfs.test', 'max_mds': 1}}]}]
213 get_daemons_by_service
.side_effect
= [[DaemonDescription(), DaemonDescription()]]
214 assert cephadm_module
.upgrade
._enough
_mds
_for
_ok
_to
_stop
(
215 DaemonDescription(daemon_type
='mds', daemon_id
='myfs.test.host1.gfknd', service_name
='mds.myfs.test'))
218 @pytest.mark
.parametrize("current_version, use_tags, show_all_versions, tags, result",
220 # several candidate versions (from different major versions)
224 False, # show_all_versions
234 ['17.1.0', '16.2.7', '16.2.6', '16.2.5', '16.1.4', '16.1.3']
236 # candidate minor versions are available
240 False, # show_all_versions
246 ['16.2.2', '16.2.1', '16.1.6']
248 # all versions are less than the current version
252 False, # show_all_versions
260 # show all versions (regardless of the current version)
264 True, # show_all_versions
272 ['17.1.0', '16.2.7', '16.2.6', '15.1.0', '14.2.0']
274 # show all tags (regardless of the current version and show_all_versions flag)
278 False, # show_all_versions
288 ['v15.2.0', 'v16.1.3', 'v16.1.4', 'v16.2.5',
289 'v16.2.6', 'v16.2.7', 'v17.1.0']
292 @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
293 def test_upgrade_ls(current_version
, use_tags
, show_all_versions
, tags
, result
, cephadm_module
: CephadmOrchestrator
):
294 with mock
.patch('cephadm.upgrade.Registry.get_tags', return_value
=tags
):
295 with mock
.patch('cephadm.upgrade.CephadmUpgrade._get_current_version', return_value
=current_version
):
296 out
= cephadm_module
.upgrade
.upgrade_ls(None, use_tags
, show_all_versions
)
298 assert out
['tags'] == result
300 assert out
['versions'] == result
303 @pytest.mark
.parametrize(
304 "upgraded, not_upgraded, daemon_types, hosts, services, should_block",
305 # [ ([(type, host, id), ... ], [...], [daemon types], [hosts], [services], True/False), ... ]
307 ( # valid, upgrade mgr daemons
309 [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')],
315 ( # invalid, can't upgrade mons until mgr is upgraded
317 [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')],
323 ( # invalid, can't upgrade mon service until all mgr daemons are upgraded
325 [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')],
331 ( # valid, upgrade mgr service
333 [('mgr', 'a', 'a.x'), ('mon', 'a', 'a')],
339 ( # valid, mgr is already upgraded so can upgrade mons
340 [('mgr', 'a', 'a.x')],
347 ( # invalid, can't upgrade all daemons on b b/c un-upgraded mgr on a
349 [('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
355 ( # valid, only daemon on b is a mgr
357 [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
363 ( # invalid, can't upgrade mon on a while mgr on b is un-upgraded
365 [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
371 ( # valid, only upgrading the mgr on a
373 [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
379 ( # valid, mgr daemon not on b are upgraded
380 [('mgr', 'a', 'a.x')],
381 [('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
387 ( # valid, all the necessary hosts are covered, mgr on c is already upgraded
388 [('mgr', 'c', 'c.z')],
389 [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a'), ('osd', 'c', '0')],
395 ( # invalid, can't upgrade mon on a while mgr on b is un-upgraded
397 [('mgr', 'a', 'a.x'), ('mgr', 'b', 'b.y'), ('mon', 'a', 'a')],
403 ( # valid, only mon not on "b" is upgraded already. Case hit while making teuthology test
405 [('mon', 'b', 'x'), ('mon', 'b', 'y'), ('osd', 'a', '1'), ('osd', 'b', '2')],
413 @mock.patch("cephadm.module.HostCache.get_daemons")
414 @mock.patch("cephadm.serve.CephadmServe._get_container_image_info")
415 @mock.patch('cephadm.module.SpecStore.__getitem__')
416 def test_staggered_upgrade_validation(
420 upgraded
: List
[Tuple
[str, str, str]],
421 not_upgraded
: List
[Tuple
[str, str, str, str]],
422 daemon_types
: Optional
[str],
423 hosts
: Optional
[str],
424 services
: Optional
[str],
426 cephadm_module
: CephadmOrchestrator
,
428 def to_dds(ts
: List
[Tuple
[str, str]], upgraded
: bool) -> List
[DaemonDescription
]:
430 digest
= 'new_image@repo_digest' if upgraded
else 'old_image@repo_digest'
432 dds
.append(DaemonDescription(daemon_type
=t
[0],
435 container_image_digests
=[digest
],
436 deployed_by
=[digest
],))
438 get_daemons
.return_value
= to_dds(upgraded
, True) + to_dds(not_upgraded
, False)
439 get_image_info
.side_effect
= async_side_effect(
440 ('new_id', 'ceph version 99.99.99 (hash)', ['new_image@repo_digest']))
442 class FakeSpecDesc():
443 def __init__(self
, spec
):
447 return FakeSpecDesc(ServiceSpec(s
))
449 get_spec
.side_effect
= _get_spec
451 with pytest
.raises(OrchestratorError
):
452 cephadm_module
.upgrade
._validate
_upgrade
_filters
(
453 'new_image_name', daemon_types
, hosts
, services
)
455 cephadm_module
.upgrade
._validate
_upgrade
_filters
(
456 'new_image_name', daemon_types
, hosts
, services
)