1 from textwrap
import dedent
5 from mgr_util
import build_url
9 from unittest
.mock
import MagicMock
, call
, patch
, ANY
11 from cephadm
.serve
import CephadmServe
12 from cephadm
.services
.cephadmservice
import MonService
, MgrService
, MdsService
, RgwService
, \
13 RbdMirrorService
, CrashService
, CephadmDaemonDeploySpec
14 from cephadm
.services
.iscsi
import IscsiService
15 from cephadm
.services
.nfs
import NFSService
16 from cephadm
.services
.osd
import OSDService
17 from cephadm
.services
.monitoring
import GrafanaService
, AlertmanagerService
, PrometheusService
, \
18 NodeExporterService
, LokiService
, PromtailService
19 from cephadm
.module
import CephadmOrchestrator
20 from ceph
.deployment
.service_spec
import IscsiServiceSpec
, MonitoringSpec
, AlertManagerSpec
, \
21 ServiceSpec
, RGWSpec
, GrafanaSpec
, SNMPGatewaySpec
, IngressSpec
, PlacementSpec
, TracingSpec
, \
22 PrometheusSpec
, CephExporterSpec
, NFSServiceSpec
23 from cephadm
.tests
.fixtures
import with_host
, with_service
, _run_cephadm
, async_side_effect
25 from ceph
.utils
import datetime_now
27 from orchestrator
import OrchestratorError
28 from orchestrator
._interface
import DaemonDescription
30 from typing
import Dict
, List
32 grafana_cert
= """-----BEGIN CERTIFICATE-----\nMIICxjCCAa4CEQDIZSujNBlKaLJzmvntjukjMA0GCSqGSIb3DQEBDQUAMCExDTAL\nBgNVBAoMBENlcGgxEDAOBgNVBAMMB2NlcGhhZG0wHhcNMjIwNzEzMTE0NzA3WhcN\nMzIwNzEwMTE0NzA3WjAhMQ0wCwYDVQQKDARDZXBoMRAwDgYDVQQDDAdjZXBoYWRt\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyyMe4DMA+MeYK7BHZMHB\nq7zjliEOcNgxomjU8qbf5USF7Mqrf6+/87XWqj4pCyAW8x0WXEr6A56a+cmBVmt+\nqtWDzl020aoId6lL5EgLLn6/kMDCCJLq++Lg9cEofMSvcZh+lY2f+1p+C+00xent\nrLXvXGOilAZWaQfojT2BpRnNWWIFbpFwlcKrlg2G0cFjV5c1m6a0wpsQ9JHOieq0\nSvwCixajwq3CwAYuuiU1wjI4oJO4Io1+g8yB3nH2Mo/25SApCxMXuXh4kHLQr/T4\n4hqisvG4uJYgKMcSIrWj5o25mclByGi1UI/kZkCUES94i7Z/3ihx4Bad0AMs/9tw\nFwIDAQABMA0GCSqGSIb3DQEBDQUAA4IBAQAf+pwz7Gd7mDwU2LY0TQXsK6/8KGzh\nHuX+ErOb8h5cOAbvCnHjyJFWf6gCITG98k9nxU9NToG0WYuNm/max1y/54f0dtxZ\npUo6KSNl3w6iYCfGOeUIj8isi06xMmeTgMNzv8DYhDt+P2igN6LenqWTVztogkiV\nxQ5ZJFFLEw4sN0CXnrZX3t5ruakxLXLTLKeE0I91YJvjClSBGkVJq26wOKQNHMhx\npWxeydQ5EgPZY+Aviz5Dnxe8aB7oSSovpXByzxURSabOuCK21awW5WJCGNpmqhWK\nZzACBDEstccj57c4OGV0eayHJRsluVr2e9NHRINZA3qdB37e6gsI1xHo\n-----END CERTIFICATE-----\n"""
34 grafana_key
= """-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDLIx7gMwD4x5gr\nsEdkwcGrvOOWIQ5w2DGiaNTypt/lRIXsyqt/r7/ztdaqPikLIBbzHRZcSvoDnpr5\nyYFWa36q1YPOXTbRqgh3qUvkSAsufr+QwMIIkur74uD1wSh8xK9xmH6VjZ/7Wn4L\n7TTF6e2ste9cY6KUBlZpB+iNPYGlGc1ZYgVukXCVwquWDYbRwWNXlzWbprTCmxD0\nkc6J6rRK/AKLFqPCrcLABi66JTXCMjigk7gijX6DzIHecfYyj/blICkLExe5eHiQ\nctCv9PjiGqKy8bi4liAoxxIitaPmjbmZyUHIaLVQj+RmQJQRL3iLtn/eKHHgFp3Q\nAyz/23AXAgMBAAECggEAVoTB3Mm8azlPlaQB9GcV3tiXslSn+uYJ1duCf0sV52dV\nBzKW8s5fGiTjpiTNhGCJhchowqxoaew+o47wmGc2TvqbpeRLuecKrjScD0GkCYyQ\neM2wlshEbz4FhIZdgS6gbuh9WaM1dW/oaZoBNR5aTYo7xYTmNNeyLA/jO2zr7+4W\n5yES1lMSBXpKk7bDGKYY4bsX2b5RLr2Grh2u2bp7hoLABCEvuu8tSQdWXLEXWpXo\njwmV3hc6tabypIa0mj2Dmn2Dmt1ppSO0AZWG/WAizN3f4Z0r/u9HnbVrVmh0IEDw\n3uf2LP5o3msG9qKCbzv3lMgt9mMr70HOKnJ8ohMSKQKBgQDLkNb+0nr152HU9AeJ\nvdz8BeMxcwxCG77iwZphZ1HprmYKvvXgedqWtS6FRU+nV6UuQoPUbQxJBQzrN1Qv\nwKSlOAPCrTJgNgF/RbfxZTrIgCPuK2KM8I89VZv92TSGi362oQA4MazXC8RAWjoJ\nSu1/PHzK3aXOfVNSLrOWvIYeZQKBgQD/dgT6RUXKg0UhmXj7ExevV+c7oOJTDlMl\nvLngrmbjRgPO9VxLnZQGdyaBJeRngU/UXfNgajT/MU8B5fSKInnTMawv/tW7634B\nw3v6n5kNIMIjJmENRsXBVMllDTkT9S7ApV+VoGnXRccbTiDapBThSGd0wri/CuwK\nNWK1YFOeywKBgEDyI/XG114PBUJ43NLQVWm+wx5qszWAPqV/2S5MVXD1qC6zgCSv\nG9NLWN1CIMimCNg6dm7Wn73IM7fzvhNCJgVkWqbItTLG6DFf3/DPODLx1wTMqLOI\nqFqMLqmNm9l1Nec0dKp5BsjRQzq4zp1aX21hsfrTPmwjxeqJZdioqy2VAoGAXR5X\nCCdSHlSlUW8RE2xNOOQw7KJjfWT+WAYoN0c7R+MQplL31rRU7dpm1bLLRBN11vJ8\nMYvlT5RYuVdqQSP6BkrX+hLJNBvOLbRlL+EXOBrVyVxHCkDe+u7+DnC4epbn+N8P\nLYpwqkDMKB7diPVAizIKTBxinXjMu5fkKDs5n+sCgYBbZheYKk5M0sIxiDfZuXGB\nkf4mJdEkTI1KUGRdCwO/O7hXbroGoUVJTwqBLi1tKqLLarwCITje2T200BYOzj82\nqwRkCXGtXPKnxYEEUOiFx9OeDrzsZV00cxsEnX0Zdj+PucQ/J3Cvd0dWUspJfLHJ\n39gnaegswnz9KMQAvzKFdg==\n-----END PRIVATE KEY-----\n"""
38 def get_addr(self
, name
: str) -> str:
45 self
.set_mon_crush_locations
: Dict
[str, List
[str]] = {}
46 self
.check_mon_command
= MagicMock(side_effect
=self
._check
_mon
_command
)
47 self
.mon_command
= MagicMock(side_effect
=self
._check
_mon
_command
)
48 self
.template
= MagicMock()
49 self
.log
= MagicMock()
50 self
.inventory
= FakeInventory()
52 def _check_mon_command(self
, cmd_dict
, inbuf
=None):
53 prefix
= cmd_dict
.get('prefix')
54 if prefix
== 'get-cmd':
55 return 0, self
.config
, ''
56 if prefix
== 'set-cmd':
57 self
.config
= cmd_dict
.get('value')
58 return 0, 'value set', ''
59 if prefix
in ['auth get']:
60 return 0, '[foo]\nkeyring = asdf\n', ''
61 if prefix
== 'quorum_status':
62 # actual quorum status output from testing
63 # note in this output all of the mons have blank crush locations
64 return 0, """{"election_epoch": 14, "quorum": [0, 1, 2], "quorum_names": ["vm-00", "vm-01", "vm-02"], "quorum_leader_name": "vm-00", "quorum_age": 101, "features": {"quorum_con": "4540138322906710015", "quorum_mon": ["kraken", "luminous", "mimic", "osdmap-prune", "nautilus", "octopus", "pacific", "elector-pinging", "quincy", "reef"]}, "monmap": {"epoch": 3, "fsid": "9863e1b8-6f24-11ed-8ad8-525400c13ad2", "modified": "2022-11-28T14:00:29.972488Z", "created": "2022-11-28T13:57:55.847497Z", "min_mon_release": 18, "min_mon_release_name": "reef", "election_strategy": 1, "disallowed_leaders: ": "", "stretch_mode": false, "tiebreaker_mon": "", "features": {"persistent": ["kraken", "luminous", "mimic", "osdmap-prune", "nautilus", "octopus", "pacific", "elector-pinging", "quincy", "reef"], "optional": []}, "mons": [{"rank": 0, "name": "vm-00", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.61:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.61:6789", "nonce": 0}]}, "addr": "192.168.122.61:6789/0", "public_addr": "192.168.122.61:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}, {"rank": 1, "name": "vm-01", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.63:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.63:6789", "nonce": 0}]}, "addr": "192.168.122.63:6789/0", "public_addr": "192.168.122.63:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}, {"rank": 2, "name": "vm-02", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.82:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.82:6789", "nonce": 0}]}, "addr": "192.168.122.82:6789/0", "public_addr": "192.168.122.82:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}]}}""", ''
65 if prefix
== 'mon set_location':
66 self
.set_mon_crush_locations
[cmd_dict
.get('name')] = cmd_dict
.get('args')
68 return -1, '', 'error'
70 def get_minimal_ceph_conf(self
) -> str:
73 def get_mgr_ip(self
) -> str:
77 class TestCephadmService
:
78 def test_set_service_url_on_dashboard(self
):
79 # pylint: disable=protected-access
81 service_url
= 'http://svc:1000'
82 service
= GrafanaService(mgr
)
83 service
._set
_service
_url
_on
_dashboard
('svc', 'get-cmd', 'set-cmd', service_url
)
84 assert mgr
.config
== service_url
86 # set-cmd should not be called if value doesn't change
87 mgr
.check_mon_command
.reset_mock()
88 service
._set
_service
_url
_on
_dashboard
('svc', 'get-cmd', 'set-cmd', service_url
)
89 mgr
.check_mon_command
.assert_called_once_with({'prefix': 'get-cmd'})
91 def _get_services(self
, mgr
):
93 osd_service
= OSDService(mgr
)
94 nfs_service
= NFSService(mgr
)
95 mon_service
= MonService(mgr
)
96 mgr_service
= MgrService(mgr
)
97 mds_service
= MdsService(mgr
)
98 rgw_service
= RgwService(mgr
)
99 rbd_mirror_service
= RbdMirrorService(mgr
)
100 grafana_service
= GrafanaService(mgr
)
101 alertmanager_service
= AlertmanagerService(mgr
)
102 prometheus_service
= PrometheusService(mgr
)
103 node_exporter_service
= NodeExporterService(mgr
)
104 loki_service
= LokiService(mgr
)
105 promtail_service
= PromtailService(mgr
)
106 crash_service
= CrashService(mgr
)
107 iscsi_service
= IscsiService(mgr
)
114 'rbd-mirror': rbd_mirror_service
,
116 'grafana': grafana_service
,
117 'alertmanager': alertmanager_service
,
118 'prometheus': prometheus_service
,
119 'node-exporter': node_exporter_service
,
120 'loki': loki_service
,
121 'promtail': promtail_service
,
122 'crash': crash_service
,
123 'iscsi': iscsi_service
,
125 return cephadm_services
127 def test_get_auth_entity(self
):
129 cephadm_services
= self
._get
_services
(mgr
)
131 for daemon_type
in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]:
132 assert "client.%s.id1" % (daemon_type
) == \
133 cephadm_services
[daemon_type
].get_auth_entity("id1", "host")
134 assert "client.%s.id1" % (daemon_type
) == \
135 cephadm_services
[daemon_type
].get_auth_entity("id1", "")
136 assert "client.%s.id1" % (daemon_type
) == \
137 cephadm_services
[daemon_type
].get_auth_entity("id1")
139 assert "client.crash.host" == \
140 cephadm_services
["crash"].get_auth_entity("id1", "host")
141 with pytest
.raises(OrchestratorError
):
142 cephadm_services
["crash"].get_auth_entity("id1", "")
143 cephadm_services
["crash"].get_auth_entity("id1")
145 assert "mon." == cephadm_services
["mon"].get_auth_entity("id1", "host")
146 assert "mon." == cephadm_services
["mon"].get_auth_entity("id1", "")
147 assert "mon." == cephadm_services
["mon"].get_auth_entity("id1")
149 assert "mgr.id1" == cephadm_services
["mgr"].get_auth_entity("id1", "host")
150 assert "mgr.id1" == cephadm_services
["mgr"].get_auth_entity("id1", "")
151 assert "mgr.id1" == cephadm_services
["mgr"].get_auth_entity("id1")
153 for daemon_type
in ["osd", "mds"]:
154 assert "%s.id1" % daemon_type
== \
155 cephadm_services
[daemon_type
].get_auth_entity("id1", "host")
156 assert "%s.id1" % daemon_type
== \
157 cephadm_services
[daemon_type
].get_auth_entity("id1", "")
158 assert "%s.id1" % daemon_type
== \
159 cephadm_services
[daemon_type
].get_auth_entity("id1")
161 # services based on CephadmService shouldn't have get_auth_entity
162 with pytest
.raises(AttributeError):
163 for daemon_type
in ['grafana', 'alertmanager', 'prometheus', 'node-exporter', 'loki', 'promtail']:
164 cephadm_services
[daemon_type
].get_auth_entity("id1", "host")
165 cephadm_services
[daemon_type
].get_auth_entity("id1", "")
166 cephadm_services
[daemon_type
].get_auth_entity("id1")
169 class TestISCSIService
:
172 iscsi_service
= IscsiService(mgr
)
174 iscsi_spec
= IscsiServiceSpec(service_type
='iscsi', service_id
="a")
175 iscsi_spec
.daemon_type
= "iscsi"
176 iscsi_spec
.daemon_id
= "a"
177 iscsi_spec
.spec
= MagicMock()
178 iscsi_spec
.spec
.daemon_type
= "iscsi"
179 iscsi_spec
.spec
.ssl_cert
= ''
180 iscsi_spec
.api_user
= "user"
181 iscsi_spec
.api_password
= "password"
182 iscsi_spec
.api_port
= 5000
183 iscsi_spec
.api_secure
= False
184 iscsi_spec
.ssl_cert
= "cert"
185 iscsi_spec
.ssl_key
= "key"
187 mgr
.spec_store
= MagicMock()
188 mgr
.spec_store
.all_specs
.get
.return_value
= iscsi_spec
190 def test_iscsi_client_caps(self
):
192 iscsi_daemon_spec
= CephadmDaemonDeploySpec(
193 host
='host', daemon_id
='a', service_name
=self
.iscsi_spec
.service_name())
195 self
.iscsi_service
.prepare_create(iscsi_daemon_spec
)
197 expected_caps
= ['mon',
198 'profile rbd, allow command "osd blocklist", allow command "config-key get" with "key" prefix "iscsi/"',
199 'mgr', 'allow command "service status"',
202 expected_call
= call({'prefix': 'auth get-or-create',
203 'entity': 'client.iscsi.a',
204 'caps': expected_caps
})
205 expected_call2
= call({'prefix': 'auth caps',
206 'entity': 'client.iscsi.a',
207 'caps': expected_caps
})
208 expected_call3
= call({'prefix': 'auth get',
209 'entity': 'client.iscsi.a'})
211 assert expected_call
in self
.mgr
.mon_command
.mock_calls
212 assert expected_call2
in self
.mgr
.mon_command
.mock_calls
213 assert expected_call3
in self
.mgr
.mon_command
.mock_calls
215 @patch('cephadm.utils.resolve_ip')
216 def test_iscsi_dashboard_config(self
, mock_resolve_ip
):
218 self
.mgr
.check_mon_command
= MagicMock()
219 self
.mgr
.check_mon_command
.return_value
= ('', '{"gateways": {}}', '')
221 # Case 1: use IPV4 address
222 id1
= DaemonDescription(daemon_type
='iscsi', hostname
="testhost1",
223 daemon_id
="a", ip
='192.168.1.1')
225 mock_resolve_ip
.return_value
= '192.168.1.1'
227 self
.iscsi_service
.config_dashboard(daemon_list
)
229 dashboard_expected_call
= call({'prefix': 'dashboard iscsi-gateway-add',
230 'name': 'testhost1'},
231 'http://user:password@192.168.1.1:5000')
233 assert dashboard_expected_call
in self
.mgr
.check_mon_command
.mock_calls
235 # Case 2: use IPV6 address
236 self
.mgr
.check_mon_command
.reset_mock()
238 id1
= DaemonDescription(daemon_type
='iscsi', hostname
="testhost1",
239 daemon_id
="a", ip
='FEDC:BA98:7654:3210:FEDC:BA98:7654:3210')
240 mock_resolve_ip
.return_value
= 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210'
242 self
.iscsi_service
.config_dashboard(daemon_list
)
244 dashboard_expected_call
= call({'prefix': 'dashboard iscsi-gateway-add',
245 'name': 'testhost1'},
246 'http://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000')
248 assert dashboard_expected_call
in self
.mgr
.check_mon_command
.mock_calls
250 # Case 3: IPV6 Address . Secure protocol
251 self
.mgr
.check_mon_command
.reset_mock()
253 self
.iscsi_spec
.api_secure
= True
255 self
.iscsi_service
.config_dashboard(daemon_list
)
257 dashboard_expected_call
= call({'prefix': 'dashboard iscsi-gateway-add',
258 'name': 'testhost1'},
259 'https://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000')
261 assert dashboard_expected_call
in self
.mgr
.check_mon_command
.mock_calls
263 @patch("cephadm.serve.CephadmServe._run_cephadm")
264 @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
265 @patch("cephadm.services.iscsi.IscsiService.get_trusted_ips")
266 def test_iscsi_config(self
, _get_trusted_ips
, _get_name
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
268 iscsi_daemon_id
= 'testpool.test.qwert'
269 trusted_ips
= '1.1.1.1,2.2.2.2'
271 api_user
= 'test-user'
272 api_password
= 'test-password'
274 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
275 _get_name
.return_value
= iscsi_daemon_id
276 _get_trusted_ips
.return_value
= trusted_ips
278 iscsi_gateway_conf
= f
"""# This file is generated by cephadm.
280 cluster_client_name = client.iscsi.{iscsi_daemon_id}
282 trusted_ip_list = {trusted_ips}
284 api_port = {api_port}
285 api_user = {api_user}
286 api_password = {api_password}
289 log_to_stderr_prefix = debug
290 log_to_file = False"""
292 with
with_host(cephadm_module
, 'test'):
293 with
with_service(cephadm_module
, IscsiServiceSpec(service_id
=pool
,
296 api_password
=api_password
,
298 trusted_ip_list
=trusted_ips
)):
299 _run_cephadm
.assert_called_with(
301 f
'iscsi.{iscsi_daemon_id}',
304 '--name', f
'iscsi.{iscsi_daemon_id}',
305 '--meta-json', f
'{"{"}"service_name": "iscsi.{pool}", "ports": [{api_port}], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null{"}"}',
306 '--config-json', '-', '--tcp-ports', '3456'
308 stdin
=json
.dumps({"config": "", "keyring": f
"[client.iscsi.{iscsi_daemon_id}]\nkey = None\n", "files": {"iscsi-gateway.cfg": iscsi_gateway_conf
}}),
312 class TestMonitoring
:
313 def _get_config(self
, url
: str) -> str:
315 # This file is generated by cephadm.
316 # See https://prometheus.io/docs/alerting/configuration/ for documentation.
322 insecure_skip_verify: true
327 - group_by: ['alertname']
331 receiver: 'ceph-dashboard'
336 - name: 'ceph-dashboard'
338 - url: '{url}/api/prometheus_receiver'
341 @pytest.mark
.parametrize(
342 "dashboard_url,expected_yaml_url",
345 ("http://[::1]:8080", "http://localhost:8080"),
348 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
349 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
353 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
354 "http://mgr.fqdn.test:8080",
358 "http://192.168.0.123:8080",
359 "http://192.168.0.123:8080",
363 "http://192.168.0.123:8080",
364 "http://mgr.fqdn.test:8080",
368 @patch("cephadm.serve.CephadmServe._run_cephadm")
369 @patch("mgr_module.MgrModule.get")
370 @patch("socket.getfqdn")
371 def test_alertmanager_config(
376 cephadm_module
: CephadmOrchestrator
,
380 _run_cephadm
.side_effect
= async_side_effect(("{}", "", 0))
381 mock_get
.return_value
= {"services": {"dashboard": dashboard_url
}}
382 purl
= urllib
.parse
.urlparse(expected_yaml_url
)
383 mock_getfqdn
.return_value
= purl
.hostname
385 with
with_host(cephadm_module
, "test"):
386 with
with_service(cephadm_module
, AlertManagerSpec()):
387 y
= dedent(self
._get
_config
(expected_yaml_url
)).lstrip()
388 _run_cephadm
.assert_called_with(
396 ('{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, '
397 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
404 {"files": {"alertmanager.yml": y
}, "peers": []}
409 @patch("cephadm.serve.CephadmServe._run_cephadm")
410 @patch("socket.getfqdn")
411 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '::1')
412 @patch("cephadm.services.monitoring.password_hash", lambda password
: 'fake_password')
413 def test_alertmanager_config_security_enabled(self
, _get_fqdn
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
414 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
417 _get_fqdn
.return_value
= fqdn
419 def gen_cert(host
, addr
):
420 return ('mycert', 'mykey')
423 return 'my_root_cert'
425 with
with_host(cephadm_module
, 'test'):
426 cephadm_module
.secure_monitoring_stack
= True
427 cephadm_module
.alertmanager_web_password
= 'fake_password'
428 cephadm_module
.alertmanager_web_user
= 'admin'
429 cephadm_module
.http_server
.service_discovery
.ssl_certs
.generate_cert
= MagicMock(side_effect
=gen_cert
)
430 cephadm_module
.http_server
.service_discovery
.ssl_certs
.get_root_cert
= MagicMock(side_effect
=get_root_cert
)
431 with
with_service(cephadm_module
, AlertManagerSpec()):
434 # This file is generated by cephadm.
435 # See https://prometheus.io/docs/alerting/configuration/ for documentation.
441 ca_file: root_cert.pem
446 - group_by: ['alertname']
450 receiver: 'ceph-dashboard'
455 - name: 'ceph-dashboard'
457 - url: 'http://{fqdn}:8080/api/prometheus_receiver'
460 web_config
= dedent("""
462 cert_file: alertmanager.crt
463 key_file: alertmanager.key
465 admin: fake_password""").lstrip()
467 _run_cephadm
.assert_called_with(
472 '--name', 'alertmanager.test',
473 '--meta-json', '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}',
474 '--config-json', '-', '--tcp-ports', '9093 9094'
478 "alertmanager.yml": y
,
479 'alertmanager.crt': 'mycert',
480 'alertmanager.key': 'mykey',
481 'web.yml': web_config
,
482 'root_cert.pem': 'my_root_cert'
485 'web_config': '/etc/alertmanager/web.yml'
489 @patch("cephadm.serve.CephadmServe._run_cephadm")
490 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '::1')
491 def test_prometheus_config_security_disabled(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
492 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
493 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1), rgw_frontend_type
='beast')
494 with
with_host(cephadm_module
, 'test'):
495 with
with_service(cephadm_module
, MonitoringSpec('node-exporter')) as _
, \
496 with_service(cephadm_module
, CephExporterSpec('ceph-exporter')) as _
, \
497 with_service(cephadm_module
, s
) as _
, \
498 with_service(cephadm_module
, AlertManagerSpec('alertmanager')) as _
, \
499 with_service(cephadm_module
, IngressSpec(service_id
='ingress',
502 monitor_user
='admin',
503 monitor_password
='12345',
504 keepalived_password
='12345',
505 virtual_ip
="1.2.3.4/32",
506 backend_service
='rgw.foo')) as _
, \
507 with_service(cephadm_module
, PrometheusSpec('prometheus')) as _
:
510 # This file is generated by cephadm.
513 evaluation_interval: 10s
515 - /etc/prometheus/alerting/*
521 - url: http://[::1]:8765/sd/prometheus/sd-config?service=alertmanager
527 - url: http://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
531 - url: http://[::1]:8765/sd/prometheus/sd-config?service=node-exporter
533 - job_name: 'haproxy'
535 - url: http://[::1]:8765/sd/prometheus/sd-config?service=haproxy
537 - job_name: 'ceph-exporter'
540 - url: http://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter
543 _run_cephadm
.assert_called_with(
548 '--name', 'prometheus.test',
550 ('{"service_name": "prometheus", "ports": [9095], "ip": null, "deployed_by": [], "rank": null, '
551 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
552 '--config-json', '-',
553 '--tcp-ports', '9095'
555 stdin
=json
.dumps({"files": {"prometheus.yml": y
,
556 "/etc/prometheus/alerting/custom_alerts.yml": ""},
557 'retention_time': '15d',
558 'retention_size': '0'}),
561 @patch("cephadm.serve.CephadmServe._run_cephadm")
562 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '::1')
563 @patch("cephadm.services.monitoring.password_hash", lambda password
: 'fake_password')
564 def test_prometheus_config_security_enabled(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
565 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
566 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1), rgw_frontend_type
='beast')
568 def gen_cert(host
, addr
):
569 return ('mycert', 'mykey')
571 with
with_host(cephadm_module
, 'test'):
572 cephadm_module
.secure_monitoring_stack
= True
573 cephadm_module
.http_server
.service_discovery
.username
= 'admin'
574 cephadm_module
.http_server
.service_discovery
.password
= 'fake_password'
575 cephadm_module
.http_server
.service_discovery
.ssl_certs
.generate_cert
= MagicMock(
576 side_effect
=gen_cert
)
577 with
with_service(cephadm_module
, MonitoringSpec('node-exporter')) as _
, \
578 with_service(cephadm_module
, s
) as _
, \
579 with_service(cephadm_module
, AlertManagerSpec('alertmanager')) as _
, \
580 with_service(cephadm_module
, IngressSpec(service_id
='ingress',
583 monitor_user
='admin',
584 monitor_password
='12345',
585 keepalived_password
='12345',
586 virtual_ip
="1.2.3.4/32",
587 backend_service
='rgw.foo')) as _
, \
588 with_service(cephadm_module
, PrometheusSpec('prometheus')) as _
:
590 web_config
= dedent("""
592 cert_file: prometheus.crt
593 key_file: prometheus.key
595 admin: fake_password""").lstrip()
598 # This file is generated by cephadm.
601 evaluation_interval: 10s
603 - /etc/prometheus/alerting/*
612 ca_file: root_cert.pem
614 - url: https://[::1]:8765/sd/prometheus/sd-config?service=alertmanager
617 password: fake_password
619 ca_file: root_cert.pem
625 ca_file: mgr_prometheus_cert.pem
628 - url: https://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
631 password: fake_password
633 ca_file: root_cert.pem
638 ca_file: root_cert.pem
640 - url: https://[::1]:8765/sd/prometheus/sd-config?service=node-exporter
643 password: fake_password
645 ca_file: root_cert.pem
647 - job_name: 'haproxy'
650 ca_file: root_cert.pem
652 - url: https://[::1]:8765/sd/prometheus/sd-config?service=haproxy
655 password: fake_password
657 ca_file: root_cert.pem
659 - job_name: 'ceph-exporter'
663 ca_file: root_cert.pem
665 - url: https://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter
668 password: fake_password
670 ca_file: root_cert.pem
673 _run_cephadm
.assert_called_with(
678 '--name', 'prometheus.test',
680 '{"service_name": "prometheus", "ports": [9095], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}',
681 '--config-json', '-',
682 '--tcp-ports', '9095'
688 'mgr_prometheus_cert.pem': '',
689 'web.yml': web_config
,
690 'prometheus.crt': 'mycert',
691 'prometheus.key': 'mykey',
692 "/etc/prometheus/alerting/custom_alerts.yml": "",
694 'retention_time': '15d',
695 'retention_size': '0',
696 'web_config': '/etc/prometheus/web.yml'}),
700 @patch("cephadm.serve.CephadmServe._run_cephadm")
701 def test_loki_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
702 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
704 with
with_host(cephadm_module
, 'test'):
705 with
with_service(cephadm_module
, MonitoringSpec('loki')) as _
:
708 # This file is generated by cephadm.
712 http_listen_port: 3100
713 grpc_listen_port: 8080
716 path_prefix: /tmp/loki
719 chunks_directory: /tmp/loki/chunks
720 rules_directory: /tmp/loki/rules
721 replication_factor: 1
723 instance_addr: 127.0.0.1
730 store: boltdb-shipper
731 object_store: filesystem
735 period: 24h""").lstrip()
737 _run_cephadm
.assert_called_with(
742 '--name', 'loki.test',
744 ('{"service_name": "loki", "ports": [3100], "ip": null, "deployed_by": [], "rank": null, '
745 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
746 '--config-json', '-',
747 '--tcp-ports', '3100'
749 stdin
=json
.dumps({"files": {"loki.yml": y
}}),
752 @patch("cephadm.serve.CephadmServe._run_cephadm")
753 def test_promtail_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
754 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
756 with
with_host(cephadm_module
, 'test'):
757 with
with_service(cephadm_module
, ServiceSpec('mgr')) as _
, \
758 with_service(cephadm_module
, MonitoringSpec('promtail')) as _
:
761 # This file is generated by cephadm.
763 http_listen_port: 9080
767 filename: /tmp/positions.yaml
770 - url: http://:3100/loki/api/v1/push
777 __path__: /var/log/ceph/**/*.log""").lstrip()
779 _run_cephadm
.assert_called_with(
784 '--name', 'promtail.test',
786 ('{"service_name": "promtail", "ports": [9080], "ip": null, "deployed_by": [], "rank": null, '
787 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
788 '--config-json', '-',
789 '--tcp-ports', '9080'
791 stdin
=json
.dumps({"files": {"promtail.yml": y
}}),
794 @patch("cephadm.serve.CephadmServe._run_cephadm")
795 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '1::4')
796 @patch("cephadm.services.monitoring.verify_tls", lambda *_
: None)
797 def test_grafana_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
798 _run_cephadm
.side_effect
= async_side_effect(("{}", "", 0))
800 with
with_host(cephadm_module
, "test"):
801 cephadm_module
.set_store("test/grafana_crt", grafana_cert
)
802 cephadm_module
.set_store("test/grafana_key", grafana_key
)
804 cephadm_module
, PrometheusSpec("prometheus")
805 ) as _
, with_service(cephadm_module
, ServiceSpec("mgr")) as _
, with_service(
806 cephadm_module
, GrafanaSpec("grafana")
809 'grafana.ini': dedent("""
810 # This file is generated by cephadm.
812 default_theme = light
815 org_name = 'Main Org.'
818 domain = 'bootstrap.storage.lab'
820 cert_file = /etc/grafana/certs/cert_file
821 cert_key = /etc/grafana/certs/cert_key
825 external_enabled = false
827 disable_initial_admin_creation = true
829 cookie_samesite = none
830 allow_embedding = true""").lstrip(), # noqa: W291
831 'provisioning/datasources/ceph-dashboard.yml': dedent("""
832 # This file is generated by cephadm.
844 url: 'http://[1::4]:9095'
855 editable: false""").lstrip(),
856 'certs/cert_file': dedent(f
"""
857 # generated by cephadm\n{grafana_cert}""").lstrip(),
858 'certs/cert_key': dedent(f
"""
859 # generated by cephadm\n{grafana_key}""").lstrip(),
862 _run_cephadm
.assert_called_with(
867 '--name', 'grafana.test',
869 ('{"service_name": "grafana", "ports": [3000], "ip": null, "deployed_by": [], "rank": null, '
870 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
871 '--config-json', '-', '--tcp-ports', '3000'],
872 stdin
=json
.dumps({"files": files
}),
875 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
876 def test_grafana_initial_admin_pw(self
, cephadm_module
: CephadmOrchestrator
):
877 with
with_host(cephadm_module
, 'test'):
878 with
with_service(cephadm_module
, ServiceSpec('mgr')) as _
, \
879 with_service(cephadm_module
, GrafanaSpec(initial_admin_password
='secure')):
880 out
= cephadm_module
.cephadm_services
['grafana'].generate_config(
881 CephadmDaemonDeploySpec('test', 'daemon', 'grafana'))
887 '# This file is generated by cephadm.\n'
889 ' default_theme = light\n'
892 " org_name = 'Main Org.'\n"
893 " org_role = 'Viewer'\n"
895 " domain = 'bootstrap.storage.lab'\n"
896 ' protocol = https\n'
897 ' cert_file = /etc/grafana/certs/cert_file\n'
898 ' cert_key = /etc/grafana/certs/cert_key\n'
899 ' http_port = 3000\n'
902 ' external_enabled = false\n'
904 ' admin_user = admin\n'
905 ' admin_password = secure\n'
906 ' cookie_secure = true\n'
907 ' cookie_samesite = none\n'
908 ' allow_embedding = true',
909 'provisioning/datasources/ceph-dashboard.yml':
910 "# This file is generated by cephadm.\n"
912 'deleteDatasources:\n\n'
918 ' basicAuth: false\n'
919 ' isDefault: false\n'
921 'certs/cert_file': ANY
,
922 'certs/cert_key': ANY
}}, ['secure_monitoring_stack:False'])
924 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
925 def test_grafana_no_anon_access(self
, cephadm_module
: CephadmOrchestrator
):
926 # with anonymous_access set to False, expecting the [auth.anonymous] section
927 # to not be present in the grafana config. Note that we require an initial_admin_password
928 # to be provided when anonymous_access is False
929 with
with_host(cephadm_module
, 'test'):
930 with
with_service(cephadm_module
, ServiceSpec('mgr')) as _
, \
931 with_service(cephadm_module
, GrafanaSpec(anonymous_access
=False, initial_admin_password
='secure')):
932 out
= cephadm_module
.cephadm_services
['grafana'].generate_config(
933 CephadmDaemonDeploySpec('test', 'daemon', 'grafana'))
939 '# This file is generated by cephadm.\n'
941 ' default_theme = light\n'
943 " domain = 'bootstrap.storage.lab'\n"
944 ' protocol = https\n'
945 ' cert_file = /etc/grafana/certs/cert_file\n'
946 ' cert_key = /etc/grafana/certs/cert_key\n'
947 ' http_port = 3000\n'
950 ' external_enabled = false\n'
952 ' admin_user = admin\n'
953 ' admin_password = secure\n'
954 ' cookie_secure = true\n'
955 ' cookie_samesite = none\n'
956 ' allow_embedding = true',
957 'provisioning/datasources/ceph-dashboard.yml':
958 "# This file is generated by cephadm.\n"
960 'deleteDatasources:\n\n'
966 ' basicAuth: false\n'
967 ' isDefault: false\n'
969 'certs/cert_file': ANY
,
970 'certs/cert_key': ANY
}}, ['secure_monitoring_stack:False'])
972 @patch("cephadm.serve.CephadmServe._run_cephadm")
973 def test_monitoring_ports(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
974 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
976 with
with_host(cephadm_module
, 'test'):
978 yaml_str
= """service_type: alertmanager
979 service_name: alertmanager
985 yaml_file
= yaml
.safe_load(yaml_str
)
986 spec
= ServiceSpec
.from_json(yaml_file
)
988 with
patch("cephadm.services.monitoring.AlertmanagerService.generate_config", return_value
=({}, [])):
989 with
with_service(cephadm_module
, spec
):
991 CephadmServe(cephadm_module
)._check
_daemons
()
993 _run_cephadm
.assert_called_with(
994 'test', 'alertmanager.test', 'deploy', [
995 '--name', 'alertmanager.test',
996 '--meta-json', ('{"service_name": "alertmanager", "ports": [4200, 9094], "ip": null, "deployed_by": [], "rank": null, '
997 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
998 '--config-json', '-',
999 '--tcp-ports', '4200 9094',
1006 class TestRGWService
:
1008 @pytest.mark
.parametrize(
1009 "frontend, ssl, extra_args, expected",
1011 ('beast', False, ['tcp_nodelay=1'],
1012 'beast endpoint=[fd00:fd00:fd00:3000::1]:80 tcp_nodelay=1'),
1013 ('beast', True, ['tcp_nodelay=0', 'max_header_size=65536'],
1014 'beast ssl_endpoint=[fd00:fd00:fd00:3000::1]:443 ssl_certificate=config://rgw/cert/rgw.foo tcp_nodelay=0 max_header_size=65536'),
1015 ('civetweb', False, [], 'civetweb port=[fd00:fd00:fd00:3000::1]:80'),
1016 ('civetweb', True, None,
1017 'civetweb port=[fd00:fd00:fd00:3000::1]:443s ssl_certificate=config://rgw/cert/rgw.foo'),
1020 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1021 def test_rgw_update(self
, frontend
, ssl
, extra_args
, expected
, cephadm_module
: CephadmOrchestrator
):
1022 with
with_host(cephadm_module
, 'host1'):
1023 cephadm_module
.cache
.update_host_networks('host1', {
1024 'fd00:fd00:fd00:3000::/64': {
1025 'if0': ['fd00:fd00:fd00:3000::1']
1028 s
= RGWSpec(service_id
="foo",
1029 networks
=['fd00:fd00:fd00:3000::/64'],
1031 rgw_frontend_type
=frontend
,
1032 rgw_frontend_extra_args
=extra_args
)
1033 with
with_service(cephadm_module
, s
) as dds
:
1034 _
, f
, _
= cephadm_module
.check_mon_command({
1035 'prefix': 'config get',
1036 'who': f
'client.{dds[0]}',
1037 'key': 'rgw_frontends',
1039 assert f
== expected
1042 class TestMonService
:
1044 def test_set_crush_locations(self
, cephadm_module
: CephadmOrchestrator
):
1046 mon_service
= MonService(mgr
)
1047 mon_spec
= ServiceSpec(service_type
='mon', crush_locations
={'vm-00': ['datacenter=a', 'rack=1'], 'vm-01': ['datacenter=a'], 'vm-02': ['datacenter=b', 'rack=3']})
1050 DaemonDescription(daemon_type
='mon', daemon_id
='vm-00', hostname
='vm-00'),
1051 DaemonDescription(daemon_type
='mon', daemon_id
='vm-01', hostname
='vm-01'),
1052 DaemonDescription(daemon_type
='mon', daemon_id
='vm-02', hostname
='vm-02')
1054 mon_service
.set_crush_locations(mon_daemons
, mon_spec
)
1055 assert 'vm-00' in mgr
.set_mon_crush_locations
1056 assert mgr
.set_mon_crush_locations
['vm-00'] == ['datacenter=a', 'rack=1']
1057 assert 'vm-01' in mgr
.set_mon_crush_locations
1058 assert mgr
.set_mon_crush_locations
['vm-01'] == ['datacenter=a']
1059 assert 'vm-02' in mgr
.set_mon_crush_locations
1060 assert mgr
.set_mon_crush_locations
['vm-02'] == ['datacenter=b', 'rack=3']
1063 class TestSNMPGateway
:
1065 @patch("cephadm.serve.CephadmServe._run_cephadm")
1066 def test_snmp_v2c_deployment(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1067 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1069 spec
= SNMPGatewaySpec(
1071 snmp_destination
='192.168.1.1:162',
1073 'snmp_community': 'public'
1077 "destination": spec
.snmp_destination
,
1078 "snmp_version": spec
.snmp_version
,
1079 "snmp_community": spec
.credentials
.get('snmp_community')
1082 with
with_host(cephadm_module
, 'test'):
1083 with
with_service(cephadm_module
, spec
):
1084 _run_cephadm
.assert_called_with(
1086 'snmp-gateway.test',
1089 '--name', 'snmp-gateway.test',
1091 ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, '
1092 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1093 '--config-json', '-',
1094 '--tcp-ports', '9464'
1096 stdin
=json
.dumps(config
),
1100 @patch("cephadm.serve.CephadmServe._run_cephadm")
1101 def test_snmp_v2c_with_port(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1102 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1104 spec
= SNMPGatewaySpec(
1106 snmp_destination
='192.168.1.1:162',
1108 'snmp_community': 'public'
1113 "destination": spec
.snmp_destination
,
1114 "snmp_version": spec
.snmp_version
,
1115 "snmp_community": spec
.credentials
.get('snmp_community')
1118 with
with_host(cephadm_module
, 'test'):
1119 with
with_service(cephadm_module
, spec
):
1120 _run_cephadm
.assert_called_with(
1122 'snmp-gateway.test',
1125 '--name', 'snmp-gateway.test',
1127 ('{"service_name": "snmp-gateway", "ports": [9465], "ip": null, "deployed_by": [], "rank": null, '
1128 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1129 '--config-json', '-',
1130 '--tcp-ports', '9465'
1132 stdin
=json
.dumps(config
),
1136 @patch("cephadm.serve.CephadmServe._run_cephadm")
1137 def test_snmp_v3nopriv_deployment(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1138 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1140 spec
= SNMPGatewaySpec(
1142 snmp_destination
='192.168.1.1:162',
1143 engine_id
='8000C53F00000000',
1145 'snmp_v3_auth_username': 'myuser',
1146 'snmp_v3_auth_password': 'mypassword'
1150 'destination': spec
.snmp_destination
,
1151 'snmp_version': spec
.snmp_version
,
1152 'snmp_v3_auth_protocol': 'SHA',
1153 'snmp_v3_auth_username': 'myuser',
1154 'snmp_v3_auth_password': 'mypassword',
1155 'snmp_v3_engine_id': '8000C53F00000000'
1158 with
with_host(cephadm_module
, 'test'):
1159 with
with_service(cephadm_module
, spec
):
1160 _run_cephadm
.assert_called_with(
1162 'snmp-gateway.test',
1165 '--name', 'snmp-gateway.test',
1167 ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, '
1168 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1169 '--config-json', '-',
1170 '--tcp-ports', '9464'
1172 stdin
=json
.dumps(config
),
1176 @patch("cephadm.serve.CephadmServe._run_cephadm")
1177 def test_snmp_v3priv_deployment(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1178 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1180 spec
= SNMPGatewaySpec(
1182 snmp_destination
='192.168.1.1:162',
1183 engine_id
='8000C53F00000000',
1184 auth_protocol
='MD5',
1185 privacy_protocol
='AES',
1187 'snmp_v3_auth_username': 'myuser',
1188 'snmp_v3_auth_password': 'mypassword',
1189 'snmp_v3_priv_password': 'mysecret',
1193 'destination': spec
.snmp_destination
,
1194 'snmp_version': spec
.snmp_version
,
1195 'snmp_v3_auth_protocol': 'MD5',
1196 'snmp_v3_auth_username': spec
.credentials
.get('snmp_v3_auth_username'),
1197 'snmp_v3_auth_password': spec
.credentials
.get('snmp_v3_auth_password'),
1198 'snmp_v3_engine_id': '8000C53F00000000',
1199 'snmp_v3_priv_protocol': spec
.privacy_protocol
,
1200 'snmp_v3_priv_password': spec
.credentials
.get('snmp_v3_priv_password'),
1203 with
with_host(cephadm_module
, 'test'):
1204 with
with_service(cephadm_module
, spec
):
1205 _run_cephadm
.assert_called_with(
1207 'snmp-gateway.test',
1210 '--name', 'snmp-gateway.test',
1212 ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, '
1213 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1214 '--config-json', '-',
1215 '--tcp-ports', '9464'
1217 stdin
=json
.dumps(config
),
1222 class TestIngressService
:
1224 @patch("cephadm.inventory.Inventory.get_addr")
1225 @patch("cephadm.utils.resolve_ip")
1226 @patch("cephadm.inventory.HostCache.get_daemons_by_service")
1227 @patch("cephadm.serve.CephadmServe._run_cephadm")
1228 def test_ingress_config_nfs_multiple_nfs_same_rank(self
, _run_cephadm
, _get_daemons_by_service
, _resolve_ip
, _get_addr
, cephadm_module
: CephadmOrchestrator
):
1229 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1231 def fake_resolve_ip(hostname
: str) -> str:
1232 if hostname
== 'host1':
1233 return '192.168.122.111'
1234 elif hostname
== 'host2':
1235 return '192.168.122.222'
1237 return 'xxx.xxx.xxx.xxx'
1238 _resolve_ip
.side_effect
= fake_resolve_ip
1240 def fake_get_addr(hostname
: str) -> str:
1242 _get_addr
.side_effect
= fake_get_addr
1244 nfs_service
= NFSServiceSpec(service_id
="foo", placement
=PlacementSpec(count
=1, hosts
=['host1', 'host2']),
1247 ispec
= IngressSpec(service_type
='ingress',
1248 service_id
='nfs.foo',
1249 backend_service
='nfs.foo',
1252 virtual_ip
='192.168.122.100/24',
1253 monitor_user
='admin',
1254 monitor_password
='12345',
1255 keepalived_password
='12345')
1257 cephadm_module
.spec_store
._specs
= {
1258 'nfs.foo': nfs_service
,
1259 'ingress.nfs.foo': ispec
1261 cephadm_module
.spec_store
.spec_created
= {
1262 'nfs.foo': datetime_now(),
1263 'ingress.nfs.foo': datetime_now()
1266 # in both test cases we'll do here, we want only the ip
1267 # for the host1 nfs daemon as we'll end up giving that
1268 # one higher rank_generation but the same rank as the one
1270 haproxy_expected_conf
= {
1274 '# This file is generated by cephadm.\n'
1276 ' log 127.0.0.1 local2\n'
1277 ' chroot /var/lib/haproxy\n'
1278 ' pidfile /var/lib/haproxy/haproxy.pid\n'
1281 ' stats socket /var/lib/haproxy/stats\n\n'
1285 ' timeout queue 1m\n'
1286 ' timeout connect 10s\n'
1287 ' timeout client 1m\n'
1288 ' timeout server 1m\n'
1289 ' timeout check 10s\n'
1293 ' bind 192.168.122.100:9049\n'
1294 ' bind host1:9049\n'
1296 ' stats uri /stats\n'
1297 ' stats refresh 10s\n'
1298 ' stats auth admin:12345\n'
1299 ' http-request use-service prometheus-exporter if { path /metrics }\n'
1300 ' monitor-uri /health\n\n'
1301 'frontend frontend\n'
1302 ' bind 192.168.122.100:2049\n'
1303 ' default_backend backend\n\n'
1307 ' hash-type consistent\n'
1308 ' server nfs.foo.0 192.168.122.111:12049\n'
1312 # verify we get the same cfg regardless of the order in which the nfs daemons are returned
1313 # in this case both nfs are rank 0, so it should only take the one with rank_generation 1 a.k.a
1316 DaemonDescription(daemon_type
='nfs', daemon_id
='foo.0.1.host1.qwerty', hostname
='host1', rank
=0, rank_generation
=1, ports
=[12049]),
1317 DaemonDescription(daemon_type
='nfs', daemon_id
='foo.0.0.host2.abcdef', hostname
='host2', rank
=0, rank_generation
=0, ports
=[12049])
1319 _get_daemons_by_service
.return_value
= nfs_daemons
1321 haproxy_generated_conf
= cephadm_module
.cephadm_services
['ingress'].haproxy_generate_config(
1322 CephadmDaemonDeploySpec(host
='host1', daemon_id
='ingress', service_name
=ispec
.service_name()))
1324 assert haproxy_generated_conf
[0] == haproxy_expected_conf
1326 # swapping order now, should still pick out the one with the higher rank_generation
1327 # in this case both nfs are rank 0, so it should only take the one with rank_generation 1 a.k.a
1330 DaemonDescription(daemon_type
='nfs', daemon_id
='foo.0.0.host2.abcdef', hostname
='host2', rank
=0, rank_generation
=0, ports
=[12049]),
1331 DaemonDescription(daemon_type
='nfs', daemon_id
='foo.0.1.host1.qwerty', hostname
='host1', rank
=0, rank_generation
=1, ports
=[12049])
1333 _get_daemons_by_service
.return_value
= nfs_daemons
1335 haproxy_generated_conf
= cephadm_module
.cephadm_services
['ingress'].haproxy_generate_config(
1336 CephadmDaemonDeploySpec(host
='host1', daemon_id
='ingress', service_name
=ispec
.service_name()))
1338 assert haproxy_generated_conf
[0] == haproxy_expected_conf
1340 @patch("cephadm.serve.CephadmServe._run_cephadm")
1341 def test_ingress_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1342 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1344 with
with_host(cephadm_module
, 'test', addr
='1.2.3.7'):
1345 cephadm_module
.cache
.update_host_networks('test', {
1347 'if0': ['1.2.3.4/32']
1351 # the ingress backend
1352 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
1353 rgw_frontend_type
='beast')
1355 ispec
= IngressSpec(service_type
='ingress',
1357 backend_service
='rgw.foo',
1360 monitor_user
='admin',
1361 monitor_password
='12345',
1362 keepalived_password
='12345',
1363 virtual_interface_networks
=['1.2.3.0/24'],
1364 virtual_ip
="1.2.3.4/32")
1365 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
1366 # generate the keepalived conf based on the specified spec
1367 keepalived_generated_conf
= cephadm_module
.cephadm_services
['ingress'].keepalived_generate_config(
1368 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
1370 keepalived_expected_conf
= {
1374 '# This file is generated by cephadm.\n'
1375 'vrrp_script check_backend {\n '
1376 'script "/usr/bin/curl http://1.2.3.7:8999/health"\n '
1381 'vrrp_instance VI_0 {\n '
1385 'virtual_router_id 50\n '
1387 'authentication {\n '
1389 'auth_pass 12345\n '
1391 'unicast_src_ip 1.2.3.7\n '
1394 'virtual_ipaddress {\n '
1395 '1.2.3.4/32 dev if0\n '
1398 'check_backend\n }\n'
1403 # check keepalived config
1404 assert keepalived_generated_conf
[0] == keepalived_expected_conf
1406 # generate the haproxy conf based on the specified spec
1407 haproxy_generated_conf
= cephadm_module
.cephadm_services
['ingress'].haproxy_generate_config(
1408 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
1410 haproxy_expected_conf
= {
1414 '# This file is generated by cephadm.'
1416 '127.0.0.1 local2\n '
1417 'chroot /var/lib/haproxy\n '
1418 'pidfile /var/lib/haproxy/haproxy.pid\n '
1421 'stats socket /var/lib/haproxy/stats\n'
1426 'option dontlognull\n '
1427 'option http-server-close\n '
1428 'option forwardfor except 127.0.0.0/8\n '
1429 'option redispatch\n '
1431 'timeout queue 20s\n '
1432 'timeout connect 5s\n '
1433 'timeout http-request 1s\n '
1434 'timeout http-keep-alive 5s\n '
1435 'timeout client 30s\n '
1436 'timeout server 30s\n '
1437 'timeout check 5s\n '
1439 '\nfrontend stats\n '
1441 'bind 1.2.3.4:8999\n '
1442 'bind 1.2.3.7:8999\n '
1444 'stats uri /stats\n '
1445 'stats refresh 10s\n '
1446 'stats auth admin:12345\n '
1447 'http-request use-service prometheus-exporter if { path /metrics }\n '
1448 'monitor-uri /health\n'
1449 '\nfrontend frontend\n '
1450 'bind 1.2.3.4:8089\n '
1451 'default_backend backend\n\n'
1452 'backend backend\n '
1453 'option forwardfor\n '
1454 'balance static-rr\n '
1455 'option httpchk HEAD / HTTP/1.0\n '
1457 + haproxy_generated_conf
[1][0] + ' 1.2.3.7:80 check weight 100\n'
1461 assert haproxy_generated_conf
[0] == haproxy_expected_conf
1463 @patch("cephadm.serve.CephadmServe._run_cephadm")
1464 def test_ingress_config_ssl_rgw(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1465 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1467 with
with_host(cephadm_module
, 'test'):
1468 cephadm_module
.cache
.update_host_networks('test', {
1470 'if0': ['1.2.3.4/32']
1474 # the ingress backend
1475 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
1476 rgw_frontend_type
='beast', rgw_frontend_port
=443, ssl
=True)
1478 ispec
= IngressSpec(service_type
='ingress',
1480 backend_service
='rgw.foo',
1483 monitor_user
='admin',
1484 monitor_password
='12345',
1485 keepalived_password
='12345',
1486 virtual_interface_networks
=['1.2.3.0/24'],
1487 virtual_ip
="1.2.3.4/32")
1488 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
1489 # generate the keepalived conf based on the specified spec
1490 keepalived_generated_conf
= cephadm_module
.cephadm_services
['ingress'].keepalived_generate_config(
1491 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
1493 keepalived_expected_conf
= {
1497 '# This file is generated by cephadm.\n'
1498 'vrrp_script check_backend {\n '
1499 'script "/usr/bin/curl http://[1::4]:8999/health"\n '
1504 'vrrp_instance VI_0 {\n '
1508 'virtual_router_id 50\n '
1510 'authentication {\n '
1512 'auth_pass 12345\n '
1514 'unicast_src_ip 1::4\n '
1517 'virtual_ipaddress {\n '
1518 '1.2.3.4/32 dev if0\n '
1521 'check_backend\n }\n'
1526 # check keepalived config
1527 assert keepalived_generated_conf
[0] == keepalived_expected_conf
1529 # generate the haproxy conf based on the specified spec
1530 haproxy_generated_conf
= cephadm_module
.cephadm_services
['ingress'].haproxy_generate_config(
1531 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
1533 haproxy_expected_conf
= {
1537 '# This file is generated by cephadm.'
1539 '127.0.0.1 local2\n '
1540 'chroot /var/lib/haproxy\n '
1541 'pidfile /var/lib/haproxy/haproxy.pid\n '
1544 'stats socket /var/lib/haproxy/stats\n'
1549 'option dontlognull\n '
1550 'option http-server-close\n '
1551 'option forwardfor except 127.0.0.0/8\n '
1552 'option redispatch\n '
1554 'timeout queue 20s\n '
1555 'timeout connect 5s\n '
1556 'timeout http-request 1s\n '
1557 'timeout http-keep-alive 5s\n '
1558 'timeout client 30s\n '
1559 'timeout server 30s\n '
1560 'timeout check 5s\n '
1562 '\nfrontend stats\n '
1564 'bind 1.2.3.4:8999\n '
1567 'stats uri /stats\n '
1568 'stats refresh 10s\n '
1569 'stats auth admin:12345\n '
1570 'http-request use-service prometheus-exporter if { path /metrics }\n '
1571 'monitor-uri /health\n'
1572 '\nfrontend frontend\n '
1573 'bind 1.2.3.4:8089\n '
1574 'default_backend backend\n\n'
1575 'backend backend\n '
1576 'option forwardfor\n '
1577 'default-server ssl\n '
1578 'default-server verify none\n '
1579 'balance static-rr\n '
1580 'option httpchk HEAD / HTTP/1.0\n '
1582 + haproxy_generated_conf
[1][0] + ' 1::4:443 check weight 100\n'
1586 assert haproxy_generated_conf
[0] == haproxy_expected_conf
1588 @patch("cephadm.serve.CephadmServe._run_cephadm")
1589 def test_ingress_config_multi_vips(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1590 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1592 with
with_host(cephadm_module
, 'test', addr
='1.2.3.7'):
1593 cephadm_module
.cache
.update_host_networks('test', {
1595 'if0': ['1.2.3.4/32']
1599 # Check the ingress with multiple VIPs
1600 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
1601 rgw_frontend_type
='beast')
1603 ispec
= IngressSpec(service_type
='ingress',
1605 backend_service
='rgw.foo',
1608 monitor_user
='admin',
1609 monitor_password
='12345',
1610 keepalived_password
='12345',
1611 virtual_interface_networks
=['1.2.3.0/24'],
1612 virtual_ips_list
=["1.2.3.4/32"])
1613 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
1614 # generate the keepalived conf based on the specified spec
1615 # Test with only 1 IP on the list, as it will fail with more VIPS but only one host.
1616 keepalived_generated_conf
= cephadm_module
.cephadm_services
['ingress'].keepalived_generate_config(
1617 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
1619 keepalived_expected_conf
= {
1623 '# This file is generated by cephadm.\n'
1624 'vrrp_script check_backend {\n '
1625 'script "/usr/bin/curl http://1.2.3.7:8999/health"\n '
1630 'vrrp_instance VI_0 {\n '
1634 'virtual_router_id 50\n '
1636 'authentication {\n '
1638 'auth_pass 12345\n '
1640 'unicast_src_ip 1.2.3.7\n '
1643 'virtual_ipaddress {\n '
1644 '1.2.3.4/32 dev if0\n '
1647 'check_backend\n }\n'
1652 # check keepalived config
1653 assert keepalived_generated_conf
[0] == keepalived_expected_conf
1655 # generate the haproxy conf based on the specified spec
1656 haproxy_generated_conf
= cephadm_module
.cephadm_services
['ingress'].haproxy_generate_config(
1657 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
1659 haproxy_expected_conf
= {
1663 '# This file is generated by cephadm.'
1665 '127.0.0.1 local2\n '
1666 'chroot /var/lib/haproxy\n '
1667 'pidfile /var/lib/haproxy/haproxy.pid\n '
1670 'stats socket /var/lib/haproxy/stats\n'
1675 'option dontlognull\n '
1676 'option http-server-close\n '
1677 'option forwardfor except 127.0.0.0/8\n '
1678 'option redispatch\n '
1680 'timeout queue 20s\n '
1681 'timeout connect 5s\n '
1682 'timeout http-request 1s\n '
1683 'timeout http-keep-alive 5s\n '
1684 'timeout client 30s\n '
1685 'timeout server 30s\n '
1686 'timeout check 5s\n '
1688 '\nfrontend stats\n '
1691 'bind 1.2.3.7:8999\n '
1693 'stats uri /stats\n '
1694 'stats refresh 10s\n '
1695 'stats auth admin:12345\n '
1696 'http-request use-service prometheus-exporter if { path /metrics }\n '
1697 'monitor-uri /health\n'
1698 '\nfrontend frontend\n '
1700 'default_backend backend\n\n'
1701 'backend backend\n '
1702 'option forwardfor\n '
1703 'balance static-rr\n '
1704 'option httpchk HEAD / HTTP/1.0\n '
1706 + haproxy_generated_conf
[1][0] + ' 1.2.3.7:80 check weight 100\n'
1710 assert haproxy_generated_conf
[0] == haproxy_expected_conf
1712 @patch("cephadm.serve.CephadmServe._run_cephadm")
1713 @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
1714 @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
1715 @patch("cephadm.services.nfs.NFSService.purge", MagicMock())
1716 @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock())
1717 def test_keepalive_only_nfs_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1718 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1720 with
with_host(cephadm_module
, 'test', addr
='1.2.3.7'):
1721 cephadm_module
.cache
.update_host_networks('test', {
1723 'if0': ['1.2.3.4/32']
1727 # Check the ingress with multiple VIPs
1728 s
= NFSServiceSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
1729 virtual_ip
='1.2.3.0/24')
1731 ispec
= IngressSpec(service_type
='ingress',
1733 backend_service
='nfs.foo',
1735 monitor_user
='admin',
1736 monitor_password
='12345',
1737 keepalived_password
='12345',
1738 virtual_ip
='1.2.3.0/24',
1739 keepalive_only
=True)
1740 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
1741 nfs_generated_conf
, _
= cephadm_module
.cephadm_services
['nfs'].generate_config(
1742 CephadmDaemonDeploySpec(host
='test', daemon_id
='foo.test.0.0', service_name
=s
.service_name()))
1743 ganesha_conf
= nfs_generated_conf
['files']['ganesha.conf']
1744 assert "Bind_addr = 1.2.3.0/24" in ganesha_conf
1746 keepalived_generated_conf
= cephadm_module
.cephadm_services
['ingress'].keepalived_generate_config(
1747 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
1749 keepalived_expected_conf
= {
1753 '# This file is generated by cephadm.\n'
1754 'vrrp_script check_backend {\n '
1755 'script "/usr/bin/false"\n '
1760 'vrrp_instance VI_0 {\n '
1764 'virtual_router_id 50\n '
1766 'authentication {\n '
1768 'auth_pass 12345\n '
1770 'unicast_src_ip 1.2.3.7\n '
1773 'virtual_ipaddress {\n '
1774 '1.2.3.0/24 dev if0\n '
1777 'check_backend\n }\n'
1782 # check keepalived config
1783 assert keepalived_generated_conf
[0] == keepalived_expected_conf
1786 class TestCephFsMirror
:
1787 @patch("cephadm.serve.CephadmServe._run_cephadm")
1788 def test_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1789 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1790 with
with_host(cephadm_module
, 'test'):
1791 with
with_service(cephadm_module
, ServiceSpec('cephfs-mirror')):
1792 cephadm_module
.assert_issued_mon_command({
1793 'prefix': 'mgr module enable',
1794 'module': 'mirroring'
1799 @patch("cephadm.serve.CephadmServe._run_cephadm")
1800 def test_jaeger_query(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1801 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1803 spec
= TracingSpec(es_nodes
="192.168.0.1:9200",
1804 service_type
="jaeger-query")
1806 config
= {"elasticsearch_nodes": "http://192.168.0.1:9200"}
1808 with
with_host(cephadm_module
, 'test'):
1809 with
with_service(cephadm_module
, spec
):
1810 _run_cephadm
.assert_called_with(
1812 'jaeger-query.test',
1815 '--name', 'jaeger-query.test',
1817 ('{"service_name": "jaeger-query", "ports": [16686], "ip": null, "deployed_by": [], "rank": null, '
1818 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1819 '--config-json', '-',
1820 '--tcp-ports', '16686'
1823 stdin
=json
.dumps(config
),
1827 @patch("cephadm.serve.CephadmServe._run_cephadm")
1828 def test_jaeger_collector_es_deploy(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1829 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1831 collector_spec
= TracingSpec(service_type
="jaeger-collector")
1832 es_spec
= TracingSpec(service_type
="elasticsearch")
1835 with
with_host(cephadm_module
, 'test'):
1836 collector_config
= {
1837 "elasticsearch_nodes": f
'http://{build_url(host=cephadm_module.inventory.get_addr("test"), port=9200).lstrip("/")}'}
1838 with
with_service(cephadm_module
, es_spec
):
1839 _run_cephadm
.assert_called_with(
1841 'elasticsearch.test',
1844 '--name', 'elasticsearch.test',
1846 ('{"service_name": "elasticsearch", "ports": [9200], "ip": null, "deployed_by": [], "rank": null, '
1847 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1848 '--config-json', '-',
1849 '--tcp-ports', '9200'
1852 stdin
=json
.dumps(es_config
),
1855 with
with_service(cephadm_module
, collector_spec
):
1856 _run_cephadm
.assert_called_with(
1858 'jaeger-collector.test',
1861 '--name', 'jaeger-collector.test',
1863 ('{"service_name": "jaeger-collector", "ports": [14250], "ip": null, "deployed_by": [], "rank": null, '
1864 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1865 '--config-json', '-',
1866 '--tcp-ports', '14250'
1869 stdin
=json
.dumps(collector_config
),
1873 @patch("cephadm.serve.CephadmServe._run_cephadm")
1874 def test_jaeger_agent(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1875 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1877 collector_spec
= TracingSpec(service_type
="jaeger-collector", es_nodes
="192.168.0.1:9200")
1878 collector_config
= {"elasticsearch_nodes": "http://192.168.0.1:9200"}
1880 agent_spec
= TracingSpec(service_type
="jaeger-agent")
1881 agent_config
= {"collector_nodes": "test:14250"}
1883 with
with_host(cephadm_module
, 'test'):
1884 with
with_service(cephadm_module
, collector_spec
):
1885 _run_cephadm
.assert_called_with(
1887 'jaeger-collector.test',
1890 '--name', 'jaeger-collector.test',
1892 ('{"service_name": "jaeger-collector", "ports": [14250], "ip": null, "deployed_by": [], "rank": null, '
1893 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1894 '--config-json', '-',
1895 '--tcp-ports', '14250'
1898 stdin
=json
.dumps(collector_config
),
1901 with
with_service(cephadm_module
, agent_spec
):
1902 _run_cephadm
.assert_called_with(
1904 'jaeger-agent.test',
1907 '--name', 'jaeger-agent.test',
1909 ('{"service_name": "jaeger-agent", "ports": [6799], "ip": null, "deployed_by": [], "rank": null, '
1910 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1911 '--config-json', '-',
1912 '--tcp-ports', '6799'
1915 stdin
=json
.dumps(agent_config
),