]>
Commit | Line | Data |
---|---|---|
f6b5b4d7 TL |
1 | import json |
2 | from unittest import mock | |
3 | ||
f91f0fd5 TL |
4 | import pytest |
5 | ||
f67539c2 | 6 | from ceph.deployment.service_spec import PlacementSpec, ServiceSpec |
f6b5b4d7 | 7 | from cephadm import CephadmOrchestrator |
f91f0fd5 TL |
8 | from cephadm.upgrade import CephadmUpgrade |
9 | from cephadm.serve import CephadmServe | |
f67539c2 | 10 | from .fixtures import _run_cephadm, wait, with_host, with_service |
f6b5b4d7 | 11 | |
f91f0fd5 | 12 | |
f67539c2 | 13 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
f6b5b4d7 TL |
14 | def test_upgrade_start(cephadm_module: CephadmOrchestrator): |
15 | with with_host(cephadm_module, 'test'): | |
f91f0fd5 | 16 | assert wait(cephadm_module, cephadm_module.upgrade_start( |
f67539c2 | 17 | 'image_id', None)) == 'Initiating upgrade to docker.io/image_id' |
f6b5b4d7 | 18 | |
f67539c2 | 19 | assert wait(cephadm_module, cephadm_module.upgrade_status()).target_image == 'docker.io/image_id' |
f6b5b4d7 | 20 | |
f67539c2 | 21 | assert wait(cephadm_module, cephadm_module.upgrade_pause()) == 'Paused upgrade to docker.io/image_id' |
f6b5b4d7 | 22 | |
f91f0fd5 | 23 | assert wait(cephadm_module, cephadm_module.upgrade_resume() |
f67539c2 | 24 | ) == 'Resumed upgrade to docker.io/image_id' |
f6b5b4d7 | 25 | |
f67539c2 | 26 | assert wait(cephadm_module, cephadm_module.upgrade_stop()) == 'Stopped upgrade to docker.io/image_id' |
f6b5b4d7 TL |
27 | |
28 | ||
f67539c2 | 29 | @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) |
f91f0fd5 TL |
30 | @pytest.mark.parametrize("use_repo_digest", |
31 | [ | |
32 | False, | |
33 | True | |
34 | ]) | |
35 | def test_upgrade_run(use_repo_digest, cephadm_module: CephadmOrchestrator): | |
f67539c2 TL |
36 | with with_host(cephadm_module, 'host1'): |
37 | with with_host(cephadm_module, 'host2'): | |
38 | cephadm_module.set_container_image('global', 'from_image') | |
39 | cephadm_module.use_repo_digest = use_repo_digest | |
40 | with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(host_pattern='*', count=2)), CephadmOrchestrator.apply_mgr, ''),\ | |
41 | mock.patch("cephadm.module.CephadmOrchestrator.lookup_release_name", | |
42 | return_value='foo'),\ | |
43 | mock.patch("cephadm.module.CephadmOrchestrator.version", | |
44 | new_callable=mock.PropertyMock) as version_mock,\ | |
45 | mock.patch("cephadm.module.CephadmOrchestrator.get", | |
46 | return_value={ | |
47 | # capture fields in both mon and osd maps | |
48 | "require_osd_release": "pacific", | |
49 | "min_mon_release": 16, | |
50 | }): | |
51 | version_mock.return_value = 'ceph version 18.2.1 (somehash)' | |
52 | assert wait(cephadm_module, cephadm_module.upgrade_start( | |
53 | 'to_image', None)) == 'Initiating upgrade to docker.io/to_image' | |
54 | ||
55 | assert wait(cephadm_module, cephadm_module.upgrade_status() | |
56 | ).target_image == 'docker.io/to_image' | |
57 | ||
58 | def _versions_mock(cmd): | |
59 | return json.dumps({ | |
60 | 'mgr': { | |
61 | 'ceph version 1.2.3 (asdf) blah': 1 | |
62 | } | |
63 | }) | |
64 | ||
65 | cephadm_module._mon_command_mock_versions = _versions_mock | |
66 | ||
67 | with mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json.dumps({ | |
68 | 'image_id': 'image_id', | |
69 | 'repo_digests': ['to_image@repo_digest'], | |
70 | 'ceph_version': 'ceph version 18.2.3 (hash)', | |
71 | }))): | |
72 | ||
73 | cephadm_module.upgrade._do_upgrade() | |
74 | ||
75 | assert cephadm_module.upgrade_status is not None | |
76 | ||
77 | with mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( | |
78 | json.dumps([ | |
79 | dict( | |
80 | name=list(cephadm_module.cache.daemons['host1'].keys())[0], | |
81 | style='cephadm', | |
82 | fsid='fsid', | |
83 | container_id='container_id', | |
84 | container_image_id='image_id', | |
85 | container_image_digests=['to_image@repo_digest'], | |
86 | deployed_by=['to_image@repo_digest'], | |
87 | version='version', | |
88 | state='running', | |
89 | ) | |
90 | ]) | |
91 | )): | |
92 | CephadmServe(cephadm_module)._refresh_hosts_and_daemons() | |
93 | ||
94 | with mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(json.dumps({ | |
95 | 'image_id': 'image_id', | |
96 | 'repo_digests': ['to_image@repo_digest'], | |
97 | 'ceph_version': 'ceph version 18.2.3 (hash)', | |
98 | }))): | |
99 | cephadm_module.upgrade._do_upgrade() | |
100 | ||
101 | _, image, _ = cephadm_module.check_mon_command({ | |
102 | 'prefix': 'config get', | |
103 | 'who': 'global', | |
104 | 'key': 'container_image', | |
f6b5b4d7 | 105 | }) |
f67539c2 TL |
106 | if use_repo_digest: |
107 | assert image == 'to_image@repo_digest' | |
108 | else: | |
109 | assert image == 'docker.io/to_image' | |
f91f0fd5 | 110 | |
f6b5b4d7 | 111 | |
f91f0fd5 TL |
112 | def test_upgrade_state_null(cephadm_module: CephadmOrchestrator): |
113 | # This test validates https://tracker.ceph.com/issues/47580 | |
114 | cephadm_module.set_store('upgrade_state', 'null') | |
115 | CephadmUpgrade(cephadm_module) | |
116 | assert CephadmUpgrade(cephadm_module).upgrade_state is None |