]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/orchestrator/tests/test_orchestrator.py
import ceph quincy 17.2.1
[ceph.git] / ceph / src / pybind / mgr / orchestrator / tests / test_orchestrator.py
1
2 import json
3 import textwrap
4
5 import pytest
6 import yaml
7
8 from ceph.deployment.hostspec import HostSpec
9 from ceph.deployment.inventory import Devices, Device
10 from ceph.deployment.service_spec import ServiceSpec
11 from ceph.deployment import inventory
12 from ceph.utils import datetime_now
13 from mgr_module import HandleCommandResult
14
15 from test_orchestrator import TestOrchestrator as _TestOrchestrator
16
17 from orchestrator import InventoryHost, DaemonDescription, ServiceDescription, DaemonDescriptionStatus, OrchResult
18 from orchestrator import OrchestratorValidationError
19 from orchestrator.module import to_format, Format, OrchestratorCli, preview_table_osd
20 from unittest import mock
21
22
23 def _test_resource(data, resource_class, extra=None):
24 # ensure we can deserialize and serialize
25 rsc = resource_class.from_json(data)
26 assert rsc.to_json() == resource_class.from_json(rsc.to_json()).to_json()
27
28 if extra:
29 # if there is an unexpected data provided
30 data_copy = data.copy()
31 data_copy.update(extra)
32 with pytest.raises(OrchestratorValidationError):
33 resource_class.from_json(data_copy)
34
35
36 def test_inventory():
37 json_data = {
38 'name': 'host0',
39 'addr': '1.2.3.4',
40 'devices': [
41 {
42 'sys_api': {
43 'rotational': '1',
44 'size': 1024,
45 },
46 'path': '/dev/sda',
47 'available': False,
48 'rejected_reasons': [],
49 'lvs': []
50 }
51 ]
52 }
53 _test_resource(json_data, InventoryHost, {'abc': False})
54 for devices in json_data['devices']:
55 _test_resource(devices, inventory.Device)
56
57 json_data = [{}, {'name': 'host0', 'addr': '1.2.3.4'}, {'devices': []}]
58 for data in json_data:
59 with pytest.raises(OrchestratorValidationError):
60 InventoryHost.from_json(data)
61
62
63 def test_daemon_description():
64 json_data = {
65 'hostname': 'test',
66 'daemon_type': 'mon',
67 'daemon_id': 'a',
68 'status': -1,
69 }
70 _test_resource(json_data, DaemonDescription, {'abc': False})
71
72 dd = DaemonDescription.from_json(json_data)
73 assert dd.status.value == DaemonDescriptionStatus.error.value
74
75
76 def test_apply():
77 to = _TestOrchestrator('', 0, 0)
78 completion = to.apply([
79 ServiceSpec(service_type='nfs', service_id='foo'),
80 ServiceSpec(service_type='nfs', service_id='foo'),
81 ServiceSpec(service_type='nfs', service_id='foo'),
82 ])
83 res = '<NFSServiceSpec for service_name=nfs.foo>'
84 assert completion.result == [res, res, res]
85
86
87 def test_yaml():
88 y = """daemon_type: crash
89 daemon_id: ubuntu
90 daemon_name: crash.ubuntu
91 hostname: ubuntu
92 status: 1
93 status_desc: starting
94 is_active: false
95 events:
96 - 2020-06-10T10:08:22.933241Z daemon:crash.ubuntu [INFO] "Deployed crash.ubuntu on
97 host 'ubuntu'"
98 ---
99 service_type: crash
100 service_name: crash
101 placement:
102 host_pattern: '*'
103 status:
104 container_image_id: 74803e884bea289d2d2d3ebdf6d37cd560499e955595695b1390a89800f4e37a
105 container_image_name: docker.io/ceph/daemon-base:latest-master-devel
106 created: '2020-06-10T10:37:31.051288Z'
107 last_refresh: '2020-06-10T10:57:40.715637Z'
108 running: 1
109 size: 1
110 events:
111 - 2020-06-10T10:37:31.139159Z service:crash [INFO] "service was created"
112 """
113 types = (DaemonDescription, ServiceDescription)
114
115 for y, cls in zip(y.split('---\n'), types):
116 data = yaml.safe_load(y)
117 object = cls.from_json(data)
118
119 assert to_format(object, Format.yaml, False, cls) == y
120 assert to_format([object], Format.yaml, True, cls) == y
121
122 j = json.loads(to_format(object, Format.json, False, cls))
123 assert to_format(cls.from_json(j), Format.yaml, False, cls) == y
124
125
126 def test_event_multiline():
127 from .._interface import OrchestratorEvent
128 e = OrchestratorEvent(datetime_now(), 'service', 'subject', 'ERROR', 'message')
129 assert OrchestratorEvent.from_json(e.to_json()) == e
130
131 e = OrchestratorEvent(datetime_now(), 'service',
132 'subject', 'ERROR', 'multiline\nmessage')
133 assert OrchestratorEvent.from_json(e.to_json()) == e
134
135
136 def test_handle_command():
137 cmd = {
138 'prefix': 'orch daemon add',
139 'daemon_type': 'mon',
140 'placement': 'smithi044:[v2:172.21.15.44:3301,v1:172.21.15.44:6790]=c',
141 }
142 m = OrchestratorCli('orchestrator', 0, 0)
143 r = m._handle_command(None, cmd)
144 assert r == HandleCommandResult(
145 retval=-2, stdout='', stderr='No orchestrator configured (try `ceph orch set backend`)')
146
147
148 r = OrchResult([ServiceDescription(spec=ServiceSpec(service_type='osd'), running=123)])
149
150
151 @mock.patch("orchestrator.OrchestratorCli.describe_service", return_value=r)
152 def test_orch_ls(_describe_service):
153 cmd = {
154 'prefix': 'orch ls',
155 }
156 m = OrchestratorCli('orchestrator', 0, 0)
157 r = m._handle_command(None, cmd)
158 out = 'NAME PORTS RUNNING REFRESHED AGE PLACEMENT \n' \
159 'osd 123 - - '
160 assert r == HandleCommandResult(retval=0, stdout=out, stderr='')
161
162 cmd = {
163 'prefix': 'orch ls',
164 'format': 'yaml',
165 }
166 m = OrchestratorCli('orchestrator', 0, 0)
167 r = m._handle_command(None, cmd)
168 out = textwrap.dedent("""
169 service_type: osd
170 service_name: osd
171 spec:
172 filter_logic: AND
173 objectstore: bluestore
174 status:
175 running: 123
176 size: 0
177 """).lstrip()
178 assert r == HandleCommandResult(retval=0, stdout=out, stderr='')
179
180
181 dlist = OrchResult([DaemonDescription(daemon_type="osd", daemon_id="1"), DaemonDescription(
182 daemon_type="osd", daemon_id="10"), DaemonDescription(daemon_type="osd", daemon_id="2")])
183
184
185 @mock.patch("orchestrator.OrchestratorCli.list_daemons", return_value=dlist)
186 def test_orch_ps(_describe_service):
187
188 # Ensure natural sorting on daemon names (osd.1, osd.2, osd.10)
189 cmd = {
190 'prefix': 'orch ps'
191 }
192 m = OrchestratorCli('orchestrator', 0, 0)
193 r = m._handle_command(None, cmd)
194 out = 'NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID \n'\
195 'osd.1 <unknown> unknown - - - - <unknown> <unknown> \n'\
196 'osd.2 <unknown> unknown - - - - <unknown> <unknown> \n'\
197 'osd.10 <unknown> unknown - - - - <unknown> <unknown> '
198 assert r == HandleCommandResult(retval=0, stdout=out, stderr='')
199
200
201 hlist = OrchResult([HostSpec("ceph-node-1"), HostSpec("ceph-node-2"), HostSpec("ceph-node-10")])
202
203
204 @mock.patch("orchestrator.OrchestratorCli.get_hosts", return_value=hlist)
205 def test_orch_host_ls(_describe_service):
206
207 # Ensure natural sorting on hostnames (ceph-node-1, ceph-node-2, ceph-node-10)
208 cmd = {
209 'prefix': 'orch host ls'
210 }
211 m = OrchestratorCli('orchestrator', 0, 0)
212 r = m._handle_command(None, cmd)
213 out = 'HOST ADDR LABELS STATUS \n'\
214 'ceph-node-1 ceph-node-1 \n'\
215 'ceph-node-2 ceph-node-2 \n'\
216 'ceph-node-10 ceph-node-10 \n'\
217 '3 hosts in cluster'
218 assert r == HandleCommandResult(retval=0, stdout=out, stderr='')
219
220
221 def test_orch_device_ls():
222 devices = Devices([Device("/dev/vdb", available=True)])
223 ilist = OrchResult([InventoryHost("ceph-node-1", devices=devices), InventoryHost("ceph-node-2",
224 devices=devices), InventoryHost("ceph-node-10", devices=devices)])
225
226 with mock.patch("orchestrator.OrchestratorCli.get_inventory", return_value=ilist):
227 # Ensure natural sorting on hostnames (ceph-node-1, ceph-node-2, ceph-node-10)
228 cmd = {
229 'prefix': 'orch device ls'
230 }
231 m = OrchestratorCli('orchestrator', 0, 0)
232 r = m._handle_command(None, cmd)
233 out = 'HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS \n'\
234 'ceph-node-1 /dev/vdb unknown None 0 Yes 0s ago \n'\
235 'ceph-node-2 /dev/vdb unknown None 0 Yes 0s ago \n'\
236 'ceph-node-10 /dev/vdb unknown None 0 Yes 0s ago '
237 assert r == HandleCommandResult(retval=0, stdout=out, stderr='')
238
239
240 def test_preview_table_osd_smoke():
241 data = [
242 {
243 'service_type': 'osd',
244 'data':
245 {
246 'foo host':
247 [
248 {
249 'osdspec': 'foo',
250 'error': '',
251 'data':
252 [
253 {
254 "block_db": "/dev/nvme0n1",
255 "block_db_size": "66.67 GB",
256 "data": "/dev/sdb",
257 "data_size": "300.00 GB",
258 "encryption": "None"
259 },
260 {
261 "block_db": "/dev/nvme0n1",
262 "block_db_size": "66.67 GB",
263 "data": "/dev/sdc",
264 "data_size": "300.00 GB",
265 "encryption": "None"
266 },
267 {
268 "block_db": "/dev/nvme0n1",
269 "block_db_size": "66.67 GB",
270 "data": "/dev/sdd",
271 "data_size": "300.00 GB",
272 "encryption": "None"
273 }
274 ]
275 }
276 ]
277 }
278 }
279 ]
280 preview_table_osd(data)