]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py
f13680cab26441c0da6b8ef1c8ffae586d12f530
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_cephadm.py
1 import datetime
2 import json
3 from contextlib import contextmanager
4
5 import pytest
6
7 from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection
8 from cephadm.osd import OSDRemoval
9
10 try:
11 from typing import Any, List
12 except ImportError:
13 pass
14
15 from execnet.gateway_bootstrap import HostNotFound
16
17 from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \
18 NFSServiceSpec, IscsiServiceSpec
19 from ceph.deployment.drive_selection.selector import DriveSelection
20 from ceph.deployment.inventory import Devices, Device
21 from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \
22 HostSpec, OrchestratorError
23 from tests import mock
24 from .fixtures import cephadm_module, wait, _run_cephadm, mon_command, match_glob
25 from cephadm.module import CephadmOrchestrator
26
27
28 """
29 TODOs:
30 There is really room for improvement here. I just quickly assembled theses tests.
31 I general, everything should be testes in Teuthology as well. Reasons for
32 also testing this here is the development roundtrip time.
33 """
34
35
36 def assert_rm_service(cephadm, srv_name):
37 assert wait(cephadm, cephadm.remove_service(srv_name)) == [
38 f'Removed service {srv_name}']
39 cephadm._apply_all_services()
40
41
42 def assert_rm_daemon(cephadm: CephadmOrchestrator, prefix, host):
43 dds: List[DaemonDescription] = wait(cephadm, cephadm.list_daemons(host=host))
44 d_names = [dd.name() for dd in dds if dd.name().startswith(prefix)]
45 assert d_names
46 c = cephadm.remove_daemons(d_names)
47 [out] = wait(cephadm, c)
48 match_glob(out, f"Removed {d_names}* from host '{host}'")
49
50
51 class TestCephadm(object):
52
53 @contextmanager
54 def _with_host(self, m, name):
55 # type: (CephadmOrchestrator, str) -> None
56 wait(m, m.add_host(HostSpec(hostname=name)))
57 yield
58 wait(m, m.remove_host(name))
59
60 def test_get_unique_name(self, cephadm_module):
61 # type: (CephadmOrchestrator) -> None
62 existing = [
63 DaemonDescription(daemon_type='mon', daemon_id='a')
64 ]
65 new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing)
66 match_glob(new_mon, 'myhost')
67 new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing)
68 match_glob(new_mgr, 'myhost.*')
69
70 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
71 def test_host(self, cephadm_module):
72 assert wait(cephadm_module, cephadm_module.get_hosts()) == []
73 with self._with_host(cephadm_module, 'test'):
74 assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')]
75
76 # Be careful with backward compatibility when changing things here:
77 assert json.loads(cephadm_module._store['inventory']) == \
78 {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}}
79
80 with self._with_host(cephadm_module, 'second'):
81 assert wait(cephadm_module, cephadm_module.get_hosts()) == [
82 HostSpec('test', 'test'),
83 HostSpec('second', 'second')
84 ]
85
86 assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', 'test')]
87 assert wait(cephadm_module, cephadm_module.get_hosts()) == []
88
89 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
90 def test_service_ls(self, cephadm_module):
91 with self._with_host(cephadm_module, 'test'):
92 c = cephadm_module.list_daemons(refresh=True)
93 assert wait(cephadm_module, c) == []
94
95 ps = PlacementSpec(hosts=['test'], count=1)
96 c = cephadm_module.add_mds(ServiceSpec('mds', 'name', placement=ps))
97 [out] = wait(cephadm_module, c)
98 match_glob(out, "Deployed mds.name.* on host 'test'")
99
100 c = cephadm_module.list_daemons()
101
102 def remove_id(dd):
103 out = dd.to_json()
104 del out['daemon_id']
105 return out
106
107 assert [remove_id(dd) for dd in wait(cephadm_module, c)] == [
108 {
109 'daemon_type': 'mds',
110 'hostname': 'test',
111 'status': 1,
112 'status_desc': 'starting'}
113 ]
114
115 ps = PlacementSpec(hosts=['test'], count=1)
116 spec = ServiceSpec('rgw', 'r.z', placement=ps)
117 c = cephadm_module.apply_rgw(spec)
118 assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'
119
120 c = cephadm_module.describe_service()
121 out = [o.to_json() for o in wait(cephadm_module, c)]
122 expected = [
123 {
124 'placement': {'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]},
125 'service_id': 'name',
126 'service_name': 'mds.name',
127 'service_type': 'mds',
128 'status': {'running': 1, 'size': 0},
129 'unmanaged': True
130 },
131 {
132 'placement': {
133 'count': 1,
134 'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]
135 },
136 'rgw_realm': 'r',
137 'rgw_zone': 'z',
138 'service_id': 'r.z',
139 'service_name': 'rgw.r.z',
140 'service_type': 'rgw',
141 'status': {'running': 0, 'size': 1}
142 }
143 ]
144 assert out == expected
145 assert [ServiceDescription.from_json(o).to_json() for o in expected] == expected
146
147 assert_rm_service(cephadm_module, 'rgw.r.z')
148 assert_rm_daemon(cephadm_module, 'mds.name', 'test')
149
150 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
151 def test_device_ls(self, cephadm_module):
152 with self._with_host(cephadm_module, 'test'):
153 c = cephadm_module.get_inventory()
154 assert wait(cephadm_module, c) == [InventoryHost('test')]
155
156 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
157 json.dumps([
158 dict(
159 name='rgw.myrgw.foobar',
160 style='cephadm',
161 fsid='fsid',
162 container_id='container_id',
163 version='version',
164 state='running',
165 )
166 ])
167 ))
168 def test_daemon_action(self, cephadm_module):
169 cephadm_module.service_cache_timeout = 10
170 with self._with_host(cephadm_module, 'test'):
171 c = cephadm_module.list_daemons(refresh=True)
172 wait(cephadm_module, c)
173 c = cephadm_module.daemon_action('redeploy', 'rgw', 'myrgw.foobar')
174 assert wait(cephadm_module, c) == ["Deployed rgw.myrgw.foobar on host 'test'"]
175
176 for what in ('start', 'stop', 'restart'):
177 c = cephadm_module.daemon_action(what, 'rgw', 'myrgw.foobar')
178 assert wait(cephadm_module, c) == [what + " rgw.myrgw.foobar from host 'test'"]
179
180 assert_rm_daemon(cephadm_module, 'rgw.myrgw.foobar', 'test')
181
182 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
183 def test_mon_add(self, cephadm_module):
184 with self._with_host(cephadm_module, 'test'):
185 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
186 c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
187 assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"]
188
189 with pytest.raises(OrchestratorError, match="Must set public_network config option or specify a CIDR network,"):
190 ps = PlacementSpec(hosts=['test'], count=1)
191 c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
192 wait(cephadm_module, c)
193
194 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
195 def test_mgr_update(self, cephadm_module):
196 with self._with_host(cephadm_module, 'test'):
197 ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
198 r = cephadm_module._apply_service(ServiceSpec('mgr', placement=ps))
199 assert r
200
201 assert_rm_daemon(cephadm_module, 'mgr.a', 'test')
202
203 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
204 def test_find_destroyed_osds(self, _mon_cmd, cephadm_module):
205 dict_out = {
206 "nodes": [
207 {
208 "id": -1,
209 "name": "default",
210 "type": "root",
211 "type_id": 11,
212 "children": [
213 -3
214 ]
215 },
216 {
217 "id": -3,
218 "name": "host1",
219 "type": "host",
220 "type_id": 1,
221 "pool_weights": {},
222 "children": [
223 0
224 ]
225 },
226 {
227 "id": 0,
228 "device_class": "hdd",
229 "name": "osd.0",
230 "type": "osd",
231 "type_id": 0,
232 "crush_weight": 0.0243988037109375,
233 "depth": 2,
234 "pool_weights": {},
235 "exists": 1,
236 "status": "destroyed",
237 "reweight": 1,
238 "primary_affinity": 1
239 }
240 ],
241 "stray": []
242 }
243 json_out = json.dumps(dict_out)
244 _mon_cmd.return_value = (0, json_out, '')
245 out = cephadm_module.find_destroyed_osds()
246 assert out == {'host1': ['0']}
247
248 @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
249 def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module):
250 _mon_cmd.return_value = (1, "", "fail_msg")
251 with pytest.raises(OrchestratorError):
252 out = cephadm_module.find_destroyed_osds()
253
254 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
255 @mock.patch("cephadm.module.SpecStore.save")
256 def test_apply_osd_save(self, _save_spec, cephadm_module):
257 with self._with_host(cephadm_module, 'test'):
258 json_spec = {'service_type': 'osd', 'host_pattern': 'test', 'service_id': 'foo', 'data_devices': {'all': True}}
259 spec = ServiceSpec.from_json(json_spec)
260 assert isinstance(spec, DriveGroupSpec)
261 c = cephadm_module.apply_drivegroups([spec])
262 assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
263 _save_spec.assert_called_with(spec)
264
265 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
266 @mock.patch("cephadm.module.SpecStore.save")
267 def test_apply_osd_save_placement(self, _save_spec, cephadm_module):
268 with self._with_host(cephadm_module, 'test'):
269 json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}}
270 spec = ServiceSpec.from_json(json_spec)
271 assert isinstance(spec, DriveGroupSpec)
272 c = cephadm_module.apply_drivegroups([spec])
273 assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
274 _save_spec.assert_called_with(spec)
275
276 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
277 def test_create_osds(self, cephadm_module):
278 with self._with_host(cephadm_module, 'test'):
279 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=['']))
280 c = cephadm_module.create_osds(dg)
281 out = wait(cephadm_module, c)
282 assert out == "Created no osd(s) on host test; already created?"
283
284 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
285 def test_prepare_drivegroup(self, cephadm_module):
286 with self._with_host(cephadm_module, 'test'):
287 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=['']))
288 out = cephadm_module.prepare_drivegroup(dg)
289 assert len(out) == 1
290 f1 = out[0]
291 assert f1[0] == 'test'
292 assert isinstance(f1[1], DriveSelection)
293
294 @pytest.mark.parametrize(
295 "devices, preview, exp_command",
296 [
297 # no preview and only one disk, prepare is used due the hack that is in place.
298 (['/dev/sda'], False, "lvm prepare --bluestore --data /dev/sda --no-systemd"),
299 # no preview and multiple disks, uses batch
300 (['/dev/sda', '/dev/sdb'], False, "lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"),
301 # preview and only one disk needs to use batch again to generate the preview
302 (['/dev/sda'], True, "lvm batch --no-auto /dev/sda --report --format json"),
303 # preview and multiple disks work the same
304 (['/dev/sda', '/dev/sdb'], True, "lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"),
305 ]
306 )
307 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
308 def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_command):
309 with self._with_host(cephadm_module, 'test'):
310 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=devices))
311 ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
312 preview = preview
313 out = cephadm_module.driveselection_to_ceph_volume(dg, ds, [], preview)
314 assert out in exp_command
315
316 @mock.patch("cephadm.module.SpecStore.find")
317 @mock.patch("cephadm.module.CephadmOrchestrator.prepare_drivegroup")
318 @mock.patch("cephadm.module.CephadmOrchestrator.driveselection_to_ceph_volume")
319 @mock.patch("cephadm.module.CephadmOrchestrator._run_ceph_volume_command")
320 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
321 def test_preview_drivegroups_str(self, _run_c_v_command, _ds_to_cv, _prepare_dg, _find_store, cephadm_module):
322 with self._with_host(cephadm_module, 'test'):
323 dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=['']))
324 _find_store.return_value = [dg]
325 _prepare_dg.return_value = [('host1', 'ds_dummy')]
326 _run_c_v_command.return_value = ("{}", '', 0)
327 cephadm_module.preview_drivegroups(drive_group_name='foo')
328 _find_store.assert_called_once_with(service_name='foo')
329 _prepare_dg.assert_called_once_with(dg)
330 _run_c_v_command.assert_called_once()
331
332 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
333 json.dumps([
334 dict(
335 name='osd.0',
336 style='cephadm',
337 fsid='fsid',
338 container_id='container_id',
339 version='version',
340 state='running',
341 )
342 ])
343 ))
344 @mock.patch("cephadm.osd.RemoveUtil.get_pg_count", lambda _, __: 0)
345 def test_remove_osds(self, cephadm_module):
346 with self._with_host(cephadm_module, 'test'):
347 c = cephadm_module.list_daemons(refresh=True)
348 wait(cephadm_module, c)
349
350 c = cephadm_module.remove_daemons(['osd.0'])
351 out = wait(cephadm_module, c)
352 assert out == ["Removed osd.0 from host 'test'"]
353
354 osd_removal_op = OSDRemoval(0, False, False, 'test', 'osd.0', datetime.datetime.utcnow(), -1)
355 cephadm_module.rm_util.queue_osds_for_removal({osd_removal_op})
356 cephadm_module.rm_util._remove_osds_bg()
357 assert cephadm_module.rm_util.to_remove_osds == set()
358
359 c = cephadm_module.remove_osds_status()
360 out = wait(cephadm_module, c)
361 assert out == set()
362
363 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
364 def test_rgw_update(self, cephadm_module):
365 with self._with_host(cephadm_module, 'host1'):
366 with self._with_host(cephadm_module, 'host2'):
367 ps = PlacementSpec(hosts=['host1'], count=1)
368 c = cephadm_module.add_rgw(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
369 [out] = wait(cephadm_module, c)
370 match_glob(out, "Deployed rgw.realm.zone1.host1.* on host 'host1'")
371
372 ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
373 r = cephadm_module._apply_service(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
374 assert r
375
376 assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host1')
377 assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host2')
378
379 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
380 json.dumps([
381 dict(
382 name='rgw.myrgw.myhost.myid',
383 style='cephadm',
384 fsid='fsid',
385 container_id='container_id',
386 version='version',
387 state='running',
388 )
389 ])
390 ))
391 def test_remove_daemon(self, cephadm_module):
392 with self._with_host(cephadm_module, 'test'):
393 c = cephadm_module.list_daemons(refresh=True)
394 wait(cephadm_module, c)
395 c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid'])
396 out = wait(cephadm_module, c)
397 assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"]
398
399 @pytest.mark.parametrize(
400 "spec, meth",
401 [
402 (ServiceSpec('crash'), CephadmOrchestrator.add_crash),
403 (ServiceSpec('prometheus'), CephadmOrchestrator.add_prometheus),
404 (ServiceSpec('grafana'), CephadmOrchestrator.add_grafana),
405 (ServiceSpec('node-exporter'), CephadmOrchestrator.add_node_exporter),
406 (ServiceSpec('alertmanager'), CephadmOrchestrator.add_alertmanager),
407 (ServiceSpec('rbd-mirror'), CephadmOrchestrator.add_rbd_mirror),
408 (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.add_mds),
409 (RGWSpec(rgw_realm='realm', rgw_zone='zone'), CephadmOrchestrator.add_rgw),
410 ]
411 )
412 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
413 def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module):
414 with self._with_host(cephadm_module, 'test'):
415 spec.placement = PlacementSpec(hosts=['test'], count=1)
416
417 c = meth(cephadm_module, spec)
418 [out] = wait(cephadm_module, c)
419 match_glob(out, f"Deployed {spec.service_name()}.* on host 'test'")
420
421 assert_rm_daemon(cephadm_module, spec.service_name(), 'test')
422
423 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
424 @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
425 def test_nfs(self, cephadm_module):
426 with self._with_host(cephadm_module, 'test'):
427 ps = PlacementSpec(hosts=['test'], count=1)
428 spec = NFSServiceSpec('name', pool='pool', namespace='namespace', placement=ps)
429 c = cephadm_module.add_nfs(spec)
430 [out] = wait(cephadm_module, c)
431 match_glob(out, "Deployed nfs.name.* on host 'test'")
432
433 assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test')
434
435 # Hack. We never created the service, but we now need to remove it.
436 # this is in contrast to the other services, which don't create this service
437 # automatically.
438 assert_rm_service(cephadm_module, 'nfs.name')
439
440 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
441 def test_iscsi(self, cephadm_module):
442 with self._with_host(cephadm_module, 'test'):
443 ps = PlacementSpec(hosts=['test'], count=1)
444 spec = IscsiServiceSpec('name', pool='pool', placement=ps)
445 c = cephadm_module.add_iscsi(spec)
446 [out] = wait(cephadm_module, c)
447 match_glob(out, "Deployed iscsi.name.* on host 'test'")
448
449 assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test')
450
451 # Hack. We never created the service, but we now need to remove it.
452 # this is in contrast to the other services, which don't create this service
453 # automatically.
454 assert_rm_service(cephadm_module, 'iscsi.name')
455
456 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
457 def test_blink_device_light(self, cephadm_module):
458 with self._with_host(cephadm_module, 'test'):
459 c = cephadm_module.blink_device_light('ident', True, [('test', '', '')])
460 assert wait(cephadm_module, c) == ['Set ident light for test: on']
461
462 @pytest.mark.parametrize(
463 "spec, meth",
464 [
465 (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr),
466 (ServiceSpec('crash'), CephadmOrchestrator.apply_crash),
467 (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus),
468 (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana),
469 (ServiceSpec('node-exporter'), CephadmOrchestrator.apply_node_exporter),
470 (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager),
471 (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror),
472 (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.apply_mds),
473 (RGWSpec(rgw_realm='realm', rgw_zone='zone'), CephadmOrchestrator.apply_rgw),
474 (NFSServiceSpec('name', pool='pool', namespace='namespace'), CephadmOrchestrator.apply_nfs),
475 (IscsiServiceSpec('name', pool='pool'), CephadmOrchestrator.apply_iscsi),
476 ]
477 )
478 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
479 def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module):
480 with self._with_host(cephadm_module, 'test'):
481 spec.placement = PlacementSpec(hosts=['test'], count=1)
482 c = meth(cephadm_module, spec)
483 assert wait(cephadm_module, c) == f'Scheduled {spec.service_name()} update...'
484 assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
485
486 assert_rm_service(cephadm_module, spec.service_name())
487
488
489 @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
490 @mock.patch("remoto.process.check")
491 def test_offline(self, _check, _get_connection, cephadm_module):
492 _check.return_value = '{}', '', 0
493 _get_connection.return_value = mock.Mock(), mock.Mock()
494 with self._with_host(cephadm_module, 'test'):
495 _get_connection.side_effect = HostNotFound
496 code, out, err = cephadm_module.check_host('test')
497 assert out == ''
498 assert 'Failed to connect to test (test)' in err
499
500 out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json()
501 assert out == HostSpec('test', 'test', status='Offline').to_json()
502
503 _get_connection.side_effect = None
504 assert cephadm_module._check_host('test') is None
505 out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json()
506 assert out == HostSpec('test', 'test').to_json()