1 from textwrap
import dedent
5 from mgr_util
import build_url
9 from unittest
.mock
import MagicMock
, call
, patch
, ANY
11 from cephadm
.serve
import CephadmServe
12 from cephadm
.services
.cephadmservice
import MonService
, MgrService
, MdsService
, RgwService
, \
13 RbdMirrorService
, CrashService
, CephadmDaemonDeploySpec
14 from cephadm
.services
.iscsi
import IscsiService
15 from cephadm
.services
.nfs
import NFSService
16 from cephadm
.services
.nvmeof
import NvmeofService
17 from cephadm
.services
.osd
import OSDService
18 from cephadm
.services
.monitoring
import GrafanaService
, AlertmanagerService
, PrometheusService
, \
19 NodeExporterService
, LokiService
, PromtailService
20 from cephadm
.services
.smb
import SMBSpec
21 from cephadm
.module
import CephadmOrchestrator
22 from ceph
.deployment
.service_spec
import (
39 from cephadm
.tests
.fixtures
import with_host
, with_service
, _run_cephadm
, async_side_effect
41 from ceph
.utils
import datetime_now
43 from orchestrator
import OrchestratorError
44 from orchestrator
._interface
import DaemonDescription
46 from typing
import Dict
, List
48 grafana_cert
= """-----BEGIN CERTIFICATE-----\nMIICxjCCAa4CEQDIZSujNBlKaLJzmvntjukjMA0GCSqGSIb3DQEBDQUAMCExDTAL\nBgNVBAoMBENlcGgxEDAOBgNVBAMMB2NlcGhhZG0wHhcNMjIwNzEzMTE0NzA3WhcN\nMzIwNzEwMTE0NzA3WjAhMQ0wCwYDVQQKDARDZXBoMRAwDgYDVQQDDAdjZXBoYWRt\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyyMe4DMA+MeYK7BHZMHB\nq7zjliEOcNgxomjU8qbf5USF7Mqrf6+/87XWqj4pCyAW8x0WXEr6A56a+cmBVmt+\nqtWDzl020aoId6lL5EgLLn6/kMDCCJLq++Lg9cEofMSvcZh+lY2f+1p+C+00xent\nrLXvXGOilAZWaQfojT2BpRnNWWIFbpFwlcKrlg2G0cFjV5c1m6a0wpsQ9JHOieq0\nSvwCixajwq3CwAYuuiU1wjI4oJO4Io1+g8yB3nH2Mo/25SApCxMXuXh4kHLQr/T4\n4hqisvG4uJYgKMcSIrWj5o25mclByGi1UI/kZkCUES94i7Z/3ihx4Bad0AMs/9tw\nFwIDAQABMA0GCSqGSIb3DQEBDQUAA4IBAQAf+pwz7Gd7mDwU2LY0TQXsK6/8KGzh\nHuX+ErOb8h5cOAbvCnHjyJFWf6gCITG98k9nxU9NToG0WYuNm/max1y/54f0dtxZ\npUo6KSNl3w6iYCfGOeUIj8isi06xMmeTgMNzv8DYhDt+P2igN6LenqWTVztogkiV\nxQ5ZJFFLEw4sN0CXnrZX3t5ruakxLXLTLKeE0I91YJvjClSBGkVJq26wOKQNHMhx\npWxeydQ5EgPZY+Aviz5Dnxe8aB7oSSovpXByzxURSabOuCK21awW5WJCGNpmqhWK\nZzACBDEstccj57c4OGV0eayHJRsluVr2e9NHRINZA3qdB37e6gsI1xHo\n-----END CERTIFICATE-----\n"""
50 grafana_key
= """-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDLIx7gMwD4x5gr\nsEdkwcGrvOOWIQ5w2DGiaNTypt/lRIXsyqt/r7/ztdaqPikLIBbzHRZcSvoDnpr5\nyYFWa36q1YPOXTbRqgh3qUvkSAsufr+QwMIIkur74uD1wSh8xK9xmH6VjZ/7Wn4L\n7TTF6e2ste9cY6KUBlZpB+iNPYGlGc1ZYgVukXCVwquWDYbRwWNXlzWbprTCmxD0\nkc6J6rRK/AKLFqPCrcLABi66JTXCMjigk7gijX6DzIHecfYyj/blICkLExe5eHiQ\nctCv9PjiGqKy8bi4liAoxxIitaPmjbmZyUHIaLVQj+RmQJQRL3iLtn/eKHHgFp3Q\nAyz/23AXAgMBAAECggEAVoTB3Mm8azlPlaQB9GcV3tiXslSn+uYJ1duCf0sV52dV\nBzKW8s5fGiTjpiTNhGCJhchowqxoaew+o47wmGc2TvqbpeRLuecKrjScD0GkCYyQ\neM2wlshEbz4FhIZdgS6gbuh9WaM1dW/oaZoBNR5aTYo7xYTmNNeyLA/jO2zr7+4W\n5yES1lMSBXpKk7bDGKYY4bsX2b5RLr2Grh2u2bp7hoLABCEvuu8tSQdWXLEXWpXo\njwmV3hc6tabypIa0mj2Dmn2Dmt1ppSO0AZWG/WAizN3f4Z0r/u9HnbVrVmh0IEDw\n3uf2LP5o3msG9qKCbzv3lMgt9mMr70HOKnJ8ohMSKQKBgQDLkNb+0nr152HU9AeJ\nvdz8BeMxcwxCG77iwZphZ1HprmYKvvXgedqWtS6FRU+nV6UuQoPUbQxJBQzrN1Qv\nwKSlOAPCrTJgNgF/RbfxZTrIgCPuK2KM8I89VZv92TSGi362oQA4MazXC8RAWjoJ\nSu1/PHzK3aXOfVNSLrOWvIYeZQKBgQD/dgT6RUXKg0UhmXj7ExevV+c7oOJTDlMl\nvLngrmbjRgPO9VxLnZQGdyaBJeRngU/UXfNgajT/MU8B5fSKInnTMawv/tW7634B\nw3v6n5kNIMIjJmENRsXBVMllDTkT9S7ApV+VoGnXRccbTiDapBThSGd0wri/CuwK\nNWK1YFOeywKBgEDyI/XG114PBUJ43NLQVWm+wx5qszWAPqV/2S5MVXD1qC6zgCSv\nG9NLWN1CIMimCNg6dm7Wn73IM7fzvhNCJgVkWqbItTLG6DFf3/DPODLx1wTMqLOI\nqFqMLqmNm9l1Nec0dKp5BsjRQzq4zp1aX21hsfrTPmwjxeqJZdioqy2VAoGAXR5X\nCCdSHlSlUW8RE2xNOOQw7KJjfWT+WAYoN0c7R+MQplL31rRU7dpm1bLLRBN11vJ8\nMYvlT5RYuVdqQSP6BkrX+hLJNBvOLbRlL+EXOBrVyVxHCkDe+u7+DnC4epbn+N8P\nLYpwqkDMKB7diPVAizIKTBxinXjMu5fkKDs5n+sCgYBbZheYKk5M0sIxiDfZuXGB\nkf4mJdEkTI1KUGRdCwO/O7hXbroGoUVJTwqBLi1tKqLLarwCITje2T200BYOzj82\nqwRkCXGtXPKnxYEEUOiFx9OeDrzsZV00cxsEnX0Zdj+PucQ/J3Cvd0dWUspJfLHJ\n39gnaegswnz9KMQAvzKFdg==\n-----END PRIVATE KEY-----\n"""
54 def get_addr(self
, name
: str) -> str:
61 self
.set_mon_crush_locations
: Dict
[str, List
[str]] = {}
62 self
.check_mon_command
= MagicMock(side_effect
=self
._check
_mon
_command
)
63 self
.mon_command
= MagicMock(side_effect
=self
._check
_mon
_command
)
64 self
.template
= MagicMock()
65 self
.log
= MagicMock()
66 self
.inventory
= FakeInventory()
68 def _check_mon_command(self
, cmd_dict
, inbuf
=None):
69 prefix
= cmd_dict
.get('prefix')
70 if prefix
== 'get-cmd':
71 return 0, self
.config
, ''
72 if prefix
== 'set-cmd':
73 self
.config
= cmd_dict
.get('value')
74 return 0, 'value set', ''
75 if prefix
in ['auth get']:
76 return 0, '[foo]\nkeyring = asdf\n', ''
77 if prefix
== 'quorum_status':
78 # actual quorum status output from testing
79 # note in this output all of the mons have blank crush locations
80 return 0, """{"election_epoch": 14, "quorum": [0, 1, 2], "quorum_names": ["vm-00", "vm-01", "vm-02"], "quorum_leader_name": "vm-00", "quorum_age": 101, "features": {"quorum_con": "4540138322906710015", "quorum_mon": ["kraken", "luminous", "mimic", "osdmap-prune", "nautilus", "octopus", "pacific", "elector-pinging", "quincy", "reef"]}, "monmap": {"epoch": 3, "fsid": "9863e1b8-6f24-11ed-8ad8-525400c13ad2", "modified": "2022-11-28T14:00:29.972488Z", "created": "2022-11-28T13:57:55.847497Z", "min_mon_release": 18, "min_mon_release_name": "reef", "election_strategy": 1, "disallowed_leaders: ": "", "stretch_mode": false, "tiebreaker_mon": "", "features": {"persistent": ["kraken", "luminous", "mimic", "osdmap-prune", "nautilus", "octopus", "pacific", "elector-pinging", "quincy", "reef"], "optional": []}, "mons": [{"rank": 0, "name": "vm-00", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.61:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.61:6789", "nonce": 0}]}, "addr": "192.168.122.61:6789/0", "public_addr": "192.168.122.61:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}, {"rank": 1, "name": "vm-01", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.63:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.63:6789", "nonce": 0}]}, "addr": "192.168.122.63:6789/0", "public_addr": "192.168.122.63:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}, {"rank": 2, "name": "vm-02", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.82:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.82:6789", "nonce": 0}]}, "addr": "192.168.122.82:6789/0", "public_addr": "192.168.122.82:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}]}}""", ''
81 if prefix
== 'mon set_location':
82 self
.set_mon_crush_locations
[cmd_dict
.get('name')] = cmd_dict
.get('args')
84 return -1, '', 'error'
86 def get_minimal_ceph_conf(self
) -> str:
89 def get_mgr_ip(self
) -> str:
93 class TestCephadmService
:
94 def test_set_service_url_on_dashboard(self
):
95 # pylint: disable=protected-access
97 service_url
= 'http://svc:1000'
98 service
= GrafanaService(mgr
)
99 service
._set
_service
_url
_on
_dashboard
('svc', 'get-cmd', 'set-cmd', service_url
)
100 assert mgr
.config
== service_url
102 # set-cmd should not be called if value doesn't change
103 mgr
.check_mon_command
.reset_mock()
104 service
._set
_service
_url
_on
_dashboard
('svc', 'get-cmd', 'set-cmd', service_url
)
105 mgr
.check_mon_command
.assert_called_once_with({'prefix': 'get-cmd'})
107 def _get_services(self
, mgr
):
109 osd_service
= OSDService(mgr
)
110 nfs_service
= NFSService(mgr
)
111 mon_service
= MonService(mgr
)
112 mgr_service
= MgrService(mgr
)
113 mds_service
= MdsService(mgr
)
114 rgw_service
= RgwService(mgr
)
115 rbd_mirror_service
= RbdMirrorService(mgr
)
116 grafana_service
= GrafanaService(mgr
)
117 alertmanager_service
= AlertmanagerService(mgr
)
118 prometheus_service
= PrometheusService(mgr
)
119 node_exporter_service
= NodeExporterService(mgr
)
120 loki_service
= LokiService(mgr
)
121 promtail_service
= PromtailService(mgr
)
122 crash_service
= CrashService(mgr
)
123 iscsi_service
= IscsiService(mgr
)
124 nvmeof_service
= NvmeofService(mgr
)
131 'rbd-mirror': rbd_mirror_service
,
133 'grafana': grafana_service
,
134 'alertmanager': alertmanager_service
,
135 'prometheus': prometheus_service
,
136 'node-exporter': node_exporter_service
,
137 'loki': loki_service
,
138 'promtail': promtail_service
,
139 'crash': crash_service
,
140 'iscsi': iscsi_service
,
141 'nvmeof': nvmeof_service
,
143 return cephadm_services
145 def test_get_auth_entity(self
):
147 cephadm_services
= self
._get
_services
(mgr
)
149 for daemon_type
in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]:
150 assert "client.%s.id1" % (daemon_type
) == \
151 cephadm_services
[daemon_type
].get_auth_entity("id1", "host")
152 assert "client.%s.id1" % (daemon_type
) == \
153 cephadm_services
[daemon_type
].get_auth_entity("id1", "")
154 assert "client.%s.id1" % (daemon_type
) == \
155 cephadm_services
[daemon_type
].get_auth_entity("id1")
157 assert "client.crash.host" == \
158 cephadm_services
["crash"].get_auth_entity("id1", "host")
159 with pytest
.raises(OrchestratorError
):
160 cephadm_services
["crash"].get_auth_entity("id1", "")
161 cephadm_services
["crash"].get_auth_entity("id1")
163 assert "mon." == cephadm_services
["mon"].get_auth_entity("id1", "host")
164 assert "mon." == cephadm_services
["mon"].get_auth_entity("id1", "")
165 assert "mon." == cephadm_services
["mon"].get_auth_entity("id1")
167 assert "mgr.id1" == cephadm_services
["mgr"].get_auth_entity("id1", "host")
168 assert "mgr.id1" == cephadm_services
["mgr"].get_auth_entity("id1", "")
169 assert "mgr.id1" == cephadm_services
["mgr"].get_auth_entity("id1")
171 for daemon_type
in ["osd", "mds"]:
172 assert "%s.id1" % daemon_type
== \
173 cephadm_services
[daemon_type
].get_auth_entity("id1", "host")
174 assert "%s.id1" % daemon_type
== \
175 cephadm_services
[daemon_type
].get_auth_entity("id1", "")
176 assert "%s.id1" % daemon_type
== \
177 cephadm_services
[daemon_type
].get_auth_entity("id1")
179 # services based on CephadmService shouldn't have get_auth_entity
180 with pytest
.raises(AttributeError):
181 for daemon_type
in ['grafana', 'alertmanager', 'prometheus', 'node-exporter', 'loki', 'promtail']:
182 cephadm_services
[daemon_type
].get_auth_entity("id1", "host")
183 cephadm_services
[daemon_type
].get_auth_entity("id1", "")
184 cephadm_services
[daemon_type
].get_auth_entity("id1")
187 class TestISCSIService
:
190 iscsi_service
= IscsiService(mgr
)
192 iscsi_spec
= IscsiServiceSpec(service_type
='iscsi', service_id
="a")
193 iscsi_spec
.daemon_type
= "iscsi"
194 iscsi_spec
.daemon_id
= "a"
195 iscsi_spec
.spec
= MagicMock()
196 iscsi_spec
.spec
.daemon_type
= "iscsi"
197 iscsi_spec
.spec
.ssl_cert
= ''
198 iscsi_spec
.api_user
= "user"
199 iscsi_spec
.api_password
= "password"
200 iscsi_spec
.api_port
= 5000
201 iscsi_spec
.api_secure
= False
202 iscsi_spec
.ssl_cert
= "cert"
203 iscsi_spec
.ssl_key
= "key"
205 mgr
.spec_store
= MagicMock()
206 mgr
.spec_store
.all_specs
.get
.return_value
= iscsi_spec
208 def test_iscsi_client_caps(self
):
210 iscsi_daemon_spec
= CephadmDaemonDeploySpec(
211 host
='host', daemon_id
='a', service_name
=self
.iscsi_spec
.service_name())
213 self
.iscsi_service
.prepare_create(iscsi_daemon_spec
)
215 expected_caps
= ['mon',
216 'profile rbd, allow command "osd blocklist", allow command "config-key get" with "key" prefix "iscsi/"',
217 'mgr', 'allow command "service status"',
220 expected_call
= call({'prefix': 'auth get-or-create',
221 'entity': 'client.iscsi.a',
222 'caps': expected_caps
})
223 expected_call2
= call({'prefix': 'auth caps',
224 'entity': 'client.iscsi.a',
225 'caps': expected_caps
})
226 expected_call3
= call({'prefix': 'auth get',
227 'entity': 'client.iscsi.a'})
229 assert expected_call
in self
.mgr
.mon_command
.mock_calls
230 assert expected_call2
in self
.mgr
.mon_command
.mock_calls
231 assert expected_call3
in self
.mgr
.mon_command
.mock_calls
233 @patch('cephadm.utils.resolve_ip')
234 def test_iscsi_dashboard_config(self
, mock_resolve_ip
):
236 self
.mgr
.check_mon_command
= MagicMock()
237 self
.mgr
.check_mon_command
.return_value
= ('', '{"gateways": {}}', '')
239 # Case 1: use IPV4 address
240 id1
= DaemonDescription(daemon_type
='iscsi', hostname
="testhost1",
241 daemon_id
="a", ip
='192.168.1.1')
243 mock_resolve_ip
.return_value
= '192.168.1.1'
245 self
.iscsi_service
.config_dashboard(daemon_list
)
247 dashboard_expected_call
= call({'prefix': 'dashboard iscsi-gateway-add',
248 'name': 'testhost1'},
249 'http://user:password@192.168.1.1:5000')
251 assert dashboard_expected_call
in self
.mgr
.check_mon_command
.mock_calls
253 # Case 2: use IPV6 address
254 self
.mgr
.check_mon_command
.reset_mock()
256 id1
= DaemonDescription(daemon_type
='iscsi', hostname
="testhost1",
257 daemon_id
="a", ip
='FEDC:BA98:7654:3210:FEDC:BA98:7654:3210')
258 mock_resolve_ip
.return_value
= 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210'
260 self
.iscsi_service
.config_dashboard(daemon_list
)
262 dashboard_expected_call
= call({'prefix': 'dashboard iscsi-gateway-add',
263 'name': 'testhost1'},
264 'http://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000')
266 assert dashboard_expected_call
in self
.mgr
.check_mon_command
.mock_calls
268 # Case 3: IPV6 Address . Secure protocol
269 self
.mgr
.check_mon_command
.reset_mock()
271 self
.iscsi_spec
.api_secure
= True
273 self
.iscsi_service
.config_dashboard(daemon_list
)
275 dashboard_expected_call
= call({'prefix': 'dashboard iscsi-gateway-add',
276 'name': 'testhost1'},
277 'https://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000')
279 assert dashboard_expected_call
in self
.mgr
.check_mon_command
.mock_calls
281 @patch("cephadm.serve.CephadmServe._run_cephadm")
282 @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
283 @patch("cephadm.services.iscsi.IscsiService.get_trusted_ips")
284 def test_iscsi_config(self
, _get_trusted_ips
, _get_name
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
286 iscsi_daemon_id
= 'testpool.test.qwert'
287 trusted_ips
= '1.1.1.1,2.2.2.2'
289 api_user
= 'test-user'
290 api_password
= 'test-password'
292 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
293 _get_name
.return_value
= iscsi_daemon_id
294 _get_trusted_ips
.return_value
= trusted_ips
296 iscsi_gateway_conf
= f
"""# This file is generated by cephadm.
298 cluster_client_name = client.iscsi.{iscsi_daemon_id}
300 trusted_ip_list = {trusted_ips}
302 api_port = {api_port}
303 api_user = {api_user}
304 api_password = {api_password}
307 log_to_stderr_prefix = debug
308 log_to_file = False"""
310 with
with_host(cephadm_module
, 'test'):
311 with
with_service(cephadm_module
, IscsiServiceSpec(service_id
=pool
,
314 api_password
=api_password
,
316 trusted_ip_list
=trusted_ips
)):
317 _run_cephadm
.assert_called_with(
319 f
'iscsi.{iscsi_daemon_id}',
324 "name": f
'iscsi.{iscsi_daemon_id}',
326 "deploy_arguments": [],
328 'tcp_ports': [api_port
],
331 'service_name': f
'iscsi.{pool}',
336 'rank_generation': None,
337 'extra_container_args': None,
338 'extra_entrypoint_args': None,
342 "keyring": f
"[client.iscsi.{iscsi_daemon_id}]\nkey = None\n",
344 "iscsi-gateway.cfg": iscsi_gateway_conf
,
348 use_current_daemon_image
=False,
352 class TestNVMEOFService
:
355 nvmeof_service
= NvmeofService(mgr
)
357 nvmeof_spec
= NvmeofServiceSpec(service_type
='nvmeof', service_id
="a")
358 nvmeof_spec
.daemon_type
= 'nvmeof'
359 nvmeof_spec
.daemon_id
= "a"
360 nvmeof_spec
.spec
= MagicMock()
361 nvmeof_spec
.spec
.daemon_type
= 'nvmeof'
363 mgr
.spec_store
= MagicMock()
364 mgr
.spec_store
.all_specs
.get
.return_value
= nvmeof_spec
366 def test_nvmeof_client_caps(self
):
369 @patch('cephadm.utils.resolve_ip')
370 def test_nvmeof_dashboard_config(self
, mock_resolve_ip
):
373 @patch("cephadm.inventory.Inventory.get_addr", lambda _
, __
: '192.168.100.100')
374 @patch("cephadm.serve.CephadmServe._run_cephadm")
375 @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
376 def test_nvmeof_config(self
, _get_name
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
378 nvmeof_daemon_id
= 'testpool.test.qwert'
380 tgt_cmd_extra_args
= '--cpumask=0xFF --msg-mempool-size=524288'
383 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
384 _get_name
.return_value
= nvmeof_daemon_id
386 nvmeof_gateway_conf
= f
"""# This file is generated by cephadm.
388 name = client.nvmeof.{nvmeof_daemon_id}
390 addr = 192.168.100.100
391 port = {default_port}
393 state_update_notify = True
394 state_update_interval_sec = 5
395 enable_spdk_discovery_controller = False
396 enable_prometheus_exporter = True
397 prometheus_exporter_ssl = False
398 prometheus_port = 10008
400 omap_file_lock_duration = 20
401 omap_file_lock_retries = 30
402 omap_file_lock_retry_sleep_interval = 1.0
403 omap_file_update_reloads = 10
404 allowed_consecutive_spdk_ping_failures = 1
405 spdk_ping_interval_in_seconds = 2.0
406 ping_spdk_under_lock = False
407 enable_monitor_client = False
411 log_files_enabled = True
412 log_files_rotation_enabled = True
413 verbose_log_messages = True
414 max_log_file_size_in_mb = 10
415 max_log_files_count = 20
416 max_log_directory_backups = 10
417 log_directory = /var/log/ceph/
420 addr = 192.168.100.100
425 config_file = /etc/ceph/ceph.conf
426 id = nvmeof.{nvmeof_daemon_id}
429 server_key = /server.key
430 client_key = /client.key
431 server_cert = /server.cert
432 client_cert = /client.cert
433 root_ca_cert = /root.ca.cert
436 tgt_path = /usr/local/bin/nvmf_tgt
437 rpc_socket_dir = /var/tmp/
438 rpc_socket_name = spdk.sock
440 bdevs_per_cluster = 32
444 transport_tcp_options = {{"in_capsule_data_size": 8192, "max_io_qpairs_per_ctrlr": 7}}
445 tgt_cmd_extra_args = {tgt_cmd_extra_args}
450 with
with_host(cephadm_module
, 'test'):
451 with
with_service(cephadm_module
, NvmeofServiceSpec(service_id
=pool
,
452 tgt_cmd_extra_args
=tgt_cmd_extra_args
,
455 _run_cephadm
.assert_called_with(
457 f
'nvmeof.{nvmeof_daemon_id}',
462 "name": "nvmeof.testpool.test.qwert",
464 "deploy_arguments": [],
466 "tcp_ports": [5500, 4420, 8009]
469 "service_name": "nvmeof.testpool",
470 "ports": [5500, 4420, 8009],
474 "rank_generation": None,
475 "extra_container_args": None,
476 "extra_entrypoint_args": None
480 "keyring": "[client.nvmeof.testpool.test.qwert]\nkey = None\n",
482 "ceph-nvmeof.conf": nvmeof_gateway_conf
486 use_current_daemon_image
=False,
490 class TestMonitoring
:
491 def _get_config(self
, url
: str) -> str:
494 # This file is generated by cephadm.
495 # See https://prometheus.io/docs/alerting/configuration/ for documentation.
501 insecure_skip_verify: true
506 - group_by: ['alertname']
510 receiver: 'ceph-dashboard'
515 - name: 'ceph-dashboard'
517 - url: '{url}/api/prometheus_receiver'
520 @pytest.mark
.parametrize(
521 "dashboard_url,expected_yaml_url",
524 ("http://[::1]:8080", "http://localhost:8080"),
527 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
528 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
532 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
533 "http://mgr.fqdn.test:8080",
537 "http://192.168.0.123:8080",
538 "http://192.168.0.123:8080",
542 "http://192.168.0.123:8080",
543 "http://mgr.fqdn.test:8080",
547 @patch("cephadm.serve.CephadmServe._run_cephadm")
548 @patch("mgr_module.MgrModule.get")
549 @patch("socket.getfqdn")
550 def test_alertmanager_config(
555 cephadm_module
: CephadmOrchestrator
,
559 _run_cephadm
.side_effect
= async_side_effect(("{}", "", 0))
560 mock_get
.return_value
= {"services": {"dashboard": dashboard_url
}}
561 purl
= urllib
.parse
.urlparse(expected_yaml_url
)
562 mock_getfqdn
.return_value
= purl
.hostname
564 with
with_host(cephadm_module
, "test"):
565 with
with_service(cephadm_module
, AlertManagerSpec()):
566 y
= dedent(self
._get
_config
(expected_yaml_url
)).lstrip()
567 _run_cephadm
.assert_called_with(
574 "name": 'alertmanager.test',
576 "deploy_arguments": [],
578 'tcp_ports': [9093, 9094],
581 'service_name': 'alertmanager',
582 'ports': [9093, 9094],
586 'rank_generation': None,
587 'extra_container_args': None,
588 'extra_entrypoint_args': None,
592 "alertmanager.yml": y
,
597 use_current_daemon_image
=False,
600 @patch("cephadm.serve.CephadmServe._run_cephadm")
601 @patch("socket.getfqdn")
602 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '::1')
603 @patch("cephadm.services.monitoring.password_hash", lambda password
: 'alertmanager_password_hash')
604 def test_alertmanager_config_security_enabled(self
, _get_fqdn
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
605 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
608 _get_fqdn
.return_value
= fqdn
610 def gen_cert(host
, addr
):
611 return ('mycert', 'mykey')
614 return 'my_root_cert'
616 with
with_host(cephadm_module
, 'test'):
617 cephadm_module
.secure_monitoring_stack
= True
618 cephadm_module
.set_store(AlertmanagerService
.USER_CFG_KEY
, 'alertmanager_user')
619 cephadm_module
.set_store(AlertmanagerService
.PASS_CFG_KEY
, 'alertmanager_plain_password')
620 cephadm_module
.http_server
.service_discovery
.ssl_certs
.generate_cert
= MagicMock(side_effect
=gen_cert
)
621 cephadm_module
.http_server
.service_discovery
.ssl_certs
.get_root_cert
= MagicMock(side_effect
=get_root_cert
)
622 with
with_service(cephadm_module
, AlertManagerSpec()):
625 # This file is generated by cephadm.
626 # See https://prometheus.io/docs/alerting/configuration/ for documentation.
632 ca_file: root_cert.pem
637 - group_by: ['alertname']
641 receiver: 'ceph-dashboard'
646 - name: 'ceph-dashboard'
648 - url: 'http://{fqdn}:8080/api/prometheus_receiver'
651 web_config
= dedent("""
653 cert_file: alertmanager.crt
654 key_file: alertmanager.key
656 alertmanager_user: alertmanager_password_hash""").lstrip()
658 _run_cephadm
.assert_called_with(
665 "name": 'alertmanager.test',
667 "deploy_arguments": [],
669 'tcp_ports': [9093, 9094],
672 'service_name': 'alertmanager',
673 'ports': [9093, 9094],
677 'rank_generation': None,
678 'extra_container_args': None,
679 'extra_entrypoint_args': None,
683 "alertmanager.yml": y
,
684 'alertmanager.crt': 'mycert',
685 'alertmanager.key': 'mykey',
686 'web.yml': web_config
,
687 'root_cert.pem': 'my_root_cert'
690 'web_config': '/etc/alertmanager/web.yml',
693 use_current_daemon_image
=False,
696 assert cephadm_module
.cert_key_store
.get_cert('alertmanager_cert', host
='test') == 'mycert'
697 assert cephadm_module
.cert_key_store
.get_key('alertmanager_key', host
='test') == 'mykey'
699 @patch("cephadm.serve.CephadmServe._run_cephadm")
700 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '::1')
701 def test_prometheus_config_security_disabled(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
702 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
703 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1), rgw_frontend_type
='beast')
704 with
with_host(cephadm_module
, 'test'):
705 # host "test" needs to have networks for keepalive to be placed
706 cephadm_module
.cache
.update_host_networks('test', {
711 with
with_service(cephadm_module
, MonitoringSpec('node-exporter')) as _
, \
712 with_service(cephadm_module
, CephExporterSpec('ceph-exporter')) as _
, \
713 with_service(cephadm_module
, s
) as _
, \
714 with_service(cephadm_module
, AlertManagerSpec('alertmanager')) as _
, \
715 with_service(cephadm_module
, IngressSpec(service_id
='ingress',
718 monitor_user
='admin',
719 monitor_password
='12345',
720 keepalived_password
='12345',
721 virtual_ip
="1.2.3.4/32",
722 backend_service
='rgw.foo')) as _
, \
723 with_service(cephadm_module
, PrometheusSpec('prometheus',
724 networks
=['1.2.3.0/24'],
725 only_bind_port_on_networks
=True)) as _
:
728 # This file is generated by cephadm.
731 evaluation_interval: 10s
736 - /etc/prometheus/alerting/*
742 - url: http://[::1]:8765/sd/prometheus/sd-config?service=alertmanager
748 - source_labels: [__address__]
749 target_label: cluster
751 - source_labels: [instance]
752 target_label: instance
753 replacement: 'ceph_cluster'
755 - url: http://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
759 - url: http://[::1]:8765/sd/prometheus/sd-config?service=node-exporter
761 - source_labels: [__address__]
762 target_label: cluster
765 - job_name: 'haproxy'
767 - url: http://[::1]:8765/sd/prometheus/sd-config?service=haproxy
769 - source_labels: [__address__]
770 target_label: cluster
773 - job_name: 'ceph-exporter'
776 - source_labels: [__address__]
777 target_label: cluster
780 - url: http://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter
784 - url: http://[::1]:8765/sd/prometheus/sd-config?service=nvmeof
786 - job_name: 'federate'
789 metrics_path: '/federate'
795 - '{job="ceph-exporter"}'
800 _run_cephadm
.assert_called_with(
807 "name": 'prometheus.test',
809 "deploy_arguments": [],
812 'port_ips': {'8765': '1.2.3.1'}
815 'service_name': 'prometheus',
820 'rank_generation': None,
821 'extra_container_args': None,
822 'extra_entrypoint_args': None,
827 "/etc/prometheus/alerting/custom_alerts.yml": "",
829 'retention_time': '15d',
830 'retention_size': '0',
831 'ip_to_bind_to': '1.2.3.1',
834 use_current_daemon_image
=False,
837 @patch("cephadm.serve.CephadmServe._run_cephadm")
838 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '::1')
839 @patch("cephadm.services.monitoring.password_hash", lambda password
: 'prometheus_password_hash')
840 def test_prometheus_config_security_enabled(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
841 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
842 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1), rgw_frontend_type
='beast')
844 def gen_cert(host
, addr
):
845 return ('mycert', 'mykey')
847 with
with_host(cephadm_module
, 'test'):
848 cephadm_module
.secure_monitoring_stack
= True
849 cephadm_module
.set_store(PrometheusService
.USER_CFG_KEY
, 'prometheus_user')
850 cephadm_module
.set_store(PrometheusService
.PASS_CFG_KEY
, 'prometheus_plain_password')
851 cephadm_module
.set_store(AlertmanagerService
.USER_CFG_KEY
, 'alertmanager_user')
852 cephadm_module
.set_store(AlertmanagerService
.PASS_CFG_KEY
, 'alertmanager_plain_password')
853 cephadm_module
.http_server
.service_discovery
.username
= 'sd_user'
854 cephadm_module
.http_server
.service_discovery
.password
= 'sd_password'
855 cephadm_module
.http_server
.service_discovery
.ssl_certs
.generate_cert
= MagicMock(
856 side_effect
=gen_cert
)
857 # host "test" needs to have networks for keepalive to be placed
858 cephadm_module
.cache
.update_host_networks('test', {
863 with
with_service(cephadm_module
, MonitoringSpec('node-exporter')) as _
, \
864 with_service(cephadm_module
, s
) as _
, \
865 with_service(cephadm_module
, AlertManagerSpec('alertmanager')) as _
, \
866 with_service(cephadm_module
, IngressSpec(service_id
='ingress',
869 monitor_user
='admin',
870 monitor_password
='12345',
871 keepalived_password
='12345',
872 virtual_ip
="1.2.3.4/32",
873 backend_service
='rgw.foo')) as _
, \
874 with_service(cephadm_module
, PrometheusSpec('prometheus')) as _
:
876 web_config
= dedent("""
878 cert_file: prometheus.crt
879 key_file: prometheus.key
881 prometheus_user: prometheus_password_hash""").lstrip()
884 # This file is generated by cephadm.
887 evaluation_interval: 10s
890 - /etc/prometheus/alerting/*
896 username: alertmanager_user
897 password: alertmanager_plain_password
899 ca_file: root_cert.pem
901 - url: https://[::1]:8765/sd/prometheus/sd-config?service=alertmanager
904 password: sd_password
906 ca_file: root_cert.pem
912 ca_file: mgr_prometheus_cert.pem
915 - source_labels: [instance]
916 target_label: instance
917 replacement: 'ceph_cluster'
919 - url: https://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
922 password: sd_password
924 ca_file: root_cert.pem
929 ca_file: root_cert.pem
931 - url: https://[::1]:8765/sd/prometheus/sd-config?service=node-exporter
934 password: sd_password
936 ca_file: root_cert.pem
938 - job_name: 'haproxy'
941 ca_file: root_cert.pem
943 - url: https://[::1]:8765/sd/prometheus/sd-config?service=haproxy
946 password: sd_password
948 ca_file: root_cert.pem
950 - job_name: 'ceph-exporter'
954 ca_file: root_cert.pem
956 - url: https://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter
959 password: sd_password
961 ca_file: root_cert.pem
967 ca_file: root_cert.pem
969 - url: https://[::1]:8765/sd/prometheus/sd-config?service=nvmeof
972 password: sd_password
974 ca_file: root_cert.pem
978 _run_cephadm
.assert_called_with(
985 "name": 'prometheus.test',
987 "deploy_arguments": [],
992 'service_name': 'prometheus',
997 'rank_generation': None,
998 'extra_container_args': None,
999 'extra_entrypoint_args': None,
1003 'prometheus.yml': y
,
1004 'root_cert.pem': '',
1005 'mgr_prometheus_cert.pem': '',
1006 'web.yml': web_config
,
1007 'prometheus.crt': 'mycert',
1008 'prometheus.key': 'mykey',
1009 "/etc/prometheus/alerting/custom_alerts.yml": "",
1011 'retention_time': '15d',
1012 'retention_size': '0',
1013 'ip_to_bind_to': '',
1014 'web_config': '/etc/prometheus/web.yml',
1017 use_current_daemon_image
=False,
1020 @patch("cephadm.serve.CephadmServe._run_cephadm")
1021 def test_loki_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1022 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1024 with
with_host(cephadm_module
, 'test'):
1025 with
with_service(cephadm_module
, MonitoringSpec('loki')) as _
:
1028 # This file is generated by cephadm.
1032 http_listen_port: 3100
1033 grpc_listen_port: 8080
1036 path_prefix: /tmp/loki
1039 chunks_directory: /tmp/loki/chunks
1040 rules_directory: /tmp/loki/rules
1041 replication_factor: 1
1043 instance_addr: 127.0.0.1
1050 store: boltdb-shipper
1051 object_store: filesystem
1058 object_store: filesystem
1062 period: 24h""").lstrip()
1064 _run_cephadm
.assert_called_with(
1067 ['_orch', 'deploy'],
1071 "name": 'loki.test',
1073 "deploy_arguments": [],
1075 'tcp_ports': [3100],
1078 'service_name': 'loki',
1083 'rank_generation': None,
1084 'extra_container_args': None,
1085 'extra_entrypoint_args': None,
1093 use_current_daemon_image
=False,
1096 @patch("cephadm.serve.CephadmServe._run_cephadm")
1097 def test_promtail_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1098 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1100 with
with_host(cephadm_module
, 'test'):
1101 with
with_service(cephadm_module
, ServiceSpec('mgr')) as _
, \
1102 with_service(cephadm_module
, MonitoringSpec('promtail')) as _
:
1105 # This file is generated by cephadm.
1107 http_listen_port: 9080
1111 filename: /tmp/positions.yaml
1114 - url: http://:3100/loki/api/v1/push
1121 __path__: /var/log/ceph/**/*.log""").lstrip()
1123 _run_cephadm
.assert_called_with(
1126 ['_orch', 'deploy'],
1130 "name": 'promtail.test',
1132 "deploy_arguments": [],
1134 'tcp_ports': [9080],
1137 'service_name': 'promtail',
1142 'rank_generation': None,
1143 'extra_container_args': None,
1144 'extra_entrypoint_args': None,
1152 use_current_daemon_image
=False,
1155 @patch("cephadm.serve.CephadmServe._run_cephadm")
1156 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _
: '1::4')
1157 @patch("cephadm.services.monitoring.verify_tls", lambda *_
: None)
1158 def test_grafana_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1159 _run_cephadm
.side_effect
= async_side_effect(("{}", "", 0))
1161 with
with_host(cephadm_module
, "test"):
1162 cephadm_module
.cert_key_store
.save_cert('grafana_cert', grafana_cert
, host
='test')
1163 cephadm_module
.cert_key_store
.save_key('grafana_key', grafana_key
, host
='test')
1165 cephadm_module
, PrometheusSpec("prometheus")
1166 ) as _
, with_service(cephadm_module
, ServiceSpec("mgr")) as _
, with_service(
1167 cephadm_module
, GrafanaSpec("grafana")
1170 'grafana.ini': dedent("""
1171 # This file is generated by cephadm.
1173 default_theme = light
1176 org_name = 'Main Org.'
1179 domain = 'bootstrap.storage.lab'
1181 cert_file = /etc/grafana/certs/cert_file
1182 cert_key = /etc/grafana/certs/cert_key
1186 external_enabled = false
1188 disable_initial_admin_creation = true
1189 cookie_secure = true
1190 cookie_samesite = none
1191 allow_embedding = true""").lstrip(), # noqa: W291
1192 'provisioning/datasources/ceph-dashboard.yml': dedent("""
1193 # This file is generated by cephadm.
1197 - name: 'Dashboard1'
1201 - name: 'Dashboard1'
1205 url: 'http://[1::4]:9095'
1216 editable: false""").lstrip(),
1217 'certs/cert_file': dedent(f
"""
1218 # generated by cephadm\n{grafana_cert}""").lstrip(),
1219 'certs/cert_key': dedent(f
"""
1220 # generated by cephadm\n{grafana_key}""").lstrip(),
1221 'provisioning/dashboards/default.yml': dedent("""
1222 # This file is generated by cephadm.
1226 - name: 'Ceph Dashboard'
1230 disableDeletion: false
1231 updateIntervalSeconds: 3
1234 path: '/etc/grafana/provisioning/dashboards'""").lstrip(),
1237 _run_cephadm
.assert_called_with(
1240 ['_orch', 'deploy'],
1244 "name": 'grafana.test',
1246 "deploy_arguments": [],
1248 'tcp_ports': [3000],
1251 'service_name': 'grafana',
1256 'rank_generation': None,
1257 'extra_container_args': None,
1258 'extra_entrypoint_args': None,
1264 use_current_daemon_image
=False,
1267 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1268 def test_grafana_initial_admin_pw(self
, cephadm_module
: CephadmOrchestrator
):
1269 with
with_host(cephadm_module
, 'test'):
1270 with
with_service(cephadm_module
, ServiceSpec('mgr')) as _
, \
1271 with_service(cephadm_module
, GrafanaSpec(initial_admin_password
='secure')):
1272 out
= cephadm_module
.cephadm_services
['grafana'].generate_config(
1273 CephadmDaemonDeploySpec('test', 'daemon', 'grafana'))
1279 '# This file is generated by cephadm.\n'
1281 ' default_theme = light\n'
1282 '[auth.anonymous]\n'
1284 " org_name = 'Main Org.'\n"
1285 " org_role = 'Viewer'\n"
1287 " domain = 'bootstrap.storage.lab'\n"
1288 ' protocol = https\n'
1289 ' cert_file = /etc/grafana/certs/cert_file\n'
1290 ' cert_key = /etc/grafana/certs/cert_key\n'
1291 ' http_port = 3000\n'
1294 ' external_enabled = false\n'
1296 ' admin_user = admin\n'
1297 ' admin_password = secure\n'
1298 ' cookie_secure = true\n'
1299 ' cookie_samesite = none\n'
1300 ' allow_embedding = true',
1301 'provisioning/datasources/ceph-dashboard.yml':
1302 "# This file is generated by cephadm.\n"
1304 'deleteDatasources:\n\n'
1308 " access: 'proxy'\n"
1310 ' basicAuth: false\n'
1311 ' isDefault: false\n'
1313 'certs/cert_file': ANY
,
1314 'certs/cert_key': ANY
,
1315 'provisioning/dashboards/default.yml':
1316 '# This file is generated by cephadm.\n'
1319 " - name: 'Ceph Dashboard'\n"
1323 ' disableDeletion: false\n'
1324 ' updateIntervalSeconds: 3\n'
1325 ' editable: false\n'
1327 " path: '/etc/grafana/provisioning/dashboards'"
1328 }}, ['secure_monitoring_stack:False'])
1330 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1331 def test_grafana_no_anon_access(self
, cephadm_module
: CephadmOrchestrator
):
1332 # with anonymous_access set to False, expecting the [auth.anonymous] section
1333 # to not be present in the grafana config. Note that we require an initial_admin_password
1334 # to be provided when anonymous_access is False
1335 with
with_host(cephadm_module
, 'test'):
1336 with
with_service(cephadm_module
, ServiceSpec('mgr')) as _
, \
1337 with_service(cephadm_module
, GrafanaSpec(anonymous_access
=False, initial_admin_password
='secure')):
1338 out
= cephadm_module
.cephadm_services
['grafana'].generate_config(
1339 CephadmDaemonDeploySpec('test', 'daemon', 'grafana'))
1345 '# This file is generated by cephadm.\n'
1347 ' default_theme = light\n'
1349 " domain = 'bootstrap.storage.lab'\n"
1350 ' protocol = https\n'
1351 ' cert_file = /etc/grafana/certs/cert_file\n'
1352 ' cert_key = /etc/grafana/certs/cert_key\n'
1353 ' http_port = 3000\n'
1356 ' external_enabled = false\n'
1358 ' admin_user = admin\n'
1359 ' admin_password = secure\n'
1360 ' cookie_secure = true\n'
1361 ' cookie_samesite = none\n'
1362 ' allow_embedding = true',
1363 'provisioning/datasources/ceph-dashboard.yml':
1364 "# This file is generated by cephadm.\n"
1366 'deleteDatasources:\n\n'
1370 " access: 'proxy'\n"
1372 ' basicAuth: false\n'
1373 ' isDefault: false\n'
1375 'certs/cert_file': ANY
,
1376 'certs/cert_key': ANY
,
1377 'provisioning/dashboards/default.yml':
1378 '# This file is generated by cephadm.\n'
1381 " - name: 'Ceph Dashboard'\n"
1385 ' disableDeletion: false\n'
1386 ' updateIntervalSeconds: 3\n'
1387 ' editable: false\n'
1389 " path: '/etc/grafana/provisioning/dashboards'"
1390 }}, ['secure_monitoring_stack:False'])
1392 @patch("cephadm.serve.CephadmServe._run_cephadm")
1393 def test_monitoring_ports(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1394 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1396 with
with_host(cephadm_module
, 'test'):
1398 yaml_str
= """service_type: alertmanager
1399 service_name: alertmanager
1405 yaml_file
= yaml
.safe_load(yaml_str
)
1406 spec
= ServiceSpec
.from_json(yaml_file
)
1408 with
patch("cephadm.services.monitoring.AlertmanagerService.generate_config", return_value
=({}, [])):
1409 with
with_service(cephadm_module
, spec
):
1411 CephadmServe(cephadm_module
)._check
_daemons
()
1413 _run_cephadm
.assert_called_with(
1415 "alertmanager.test",
1416 ['_orch', 'deploy'],
1420 "name": 'alertmanager.test',
1422 "deploy_arguments": [],
1424 'tcp_ports': [4200, 9094],
1428 'service_name': 'alertmanager',
1429 'ports': [4200, 9094],
1433 'rank_generation': None,
1434 'extra_container_args': None,
1435 'extra_entrypoint_args': None,
1439 use_current_daemon_image
=True,
1443 class TestRGWService
:
1445 @pytest.mark
.parametrize(
1446 "frontend, ssl, extra_args, expected",
1448 ('beast', False, ['tcp_nodelay=1'],
1449 'beast endpoint=[fd00:fd00:fd00:3000::1]:80 tcp_nodelay=1'),
1450 ('beast', True, ['tcp_nodelay=0', 'max_header_size=65536'],
1451 'beast ssl_endpoint=[fd00:fd00:fd00:3000::1]:443 ssl_certificate=config://rgw/cert/rgw.foo tcp_nodelay=0 max_header_size=65536'),
1452 ('civetweb', False, [], 'civetweb port=[fd00:fd00:fd00:3000::1]:80'),
1453 ('civetweb', True, None,
1454 'civetweb port=[fd00:fd00:fd00:3000::1]:443s ssl_certificate=config://rgw/cert/rgw.foo'),
1457 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1458 def test_rgw_update(self
, frontend
, ssl
, extra_args
, expected
, cephadm_module
: CephadmOrchestrator
):
1459 with
with_host(cephadm_module
, 'host1'):
1460 cephadm_module
.cache
.update_host_networks('host1', {
1461 'fd00:fd00:fd00:3000::/64': {
1462 'if0': ['fd00:fd00:fd00:3000::1']
1465 s
= RGWSpec(service_id
="foo",
1466 networks
=['fd00:fd00:fd00:3000::/64'],
1468 rgw_frontend_type
=frontend
,
1469 rgw_frontend_extra_args
=extra_args
)
1470 with
with_service(cephadm_module
, s
) as dds
:
1471 _
, f
, _
= cephadm_module
.check_mon_command({
1472 'prefix': 'config get',
1473 'who': f
'client.{dds[0]}',
1474 'key': 'rgw_frontends',
1476 assert f
== expected
1479 class TestMonService
:
1481 def test_set_crush_locations(self
, cephadm_module
: CephadmOrchestrator
):
1483 mon_service
= MonService(mgr
)
1484 mon_spec
= ServiceSpec(service_type
='mon', crush_locations
={'vm-00': ['datacenter=a', 'rack=1'], 'vm-01': ['datacenter=a'], 'vm-02': ['datacenter=b', 'rack=3']})
1487 DaemonDescription(daemon_type
='mon', daemon_id
='vm-00', hostname
='vm-00'),
1488 DaemonDescription(daemon_type
='mon', daemon_id
='vm-01', hostname
='vm-01'),
1489 DaemonDescription(daemon_type
='mon', daemon_id
='vm-02', hostname
='vm-02')
1491 mon_service
.set_crush_locations(mon_daemons
, mon_spec
)
1492 assert 'vm-00' in mgr
.set_mon_crush_locations
1493 assert mgr
.set_mon_crush_locations
['vm-00'] == ['datacenter=a', 'rack=1']
1494 assert 'vm-01' in mgr
.set_mon_crush_locations
1495 assert mgr
.set_mon_crush_locations
['vm-01'] == ['datacenter=a']
1496 assert 'vm-02' in mgr
.set_mon_crush_locations
1497 assert mgr
.set_mon_crush_locations
['vm-02'] == ['datacenter=b', 'rack=3']
1500 class TestSNMPGateway
:
1502 @patch("cephadm.serve.CephadmServe._run_cephadm")
1503 def test_snmp_v2c_deployment(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1504 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1506 spec
= SNMPGatewaySpec(
1508 snmp_destination
='192.168.1.1:162',
1510 'snmp_community': 'public'
1514 "destination": spec
.snmp_destination
,
1515 "snmp_version": spec
.snmp_version
,
1516 "snmp_community": spec
.credentials
.get('snmp_community')
1519 with
with_host(cephadm_module
, 'test'):
1520 with
with_service(cephadm_module
, spec
):
1521 _run_cephadm
.assert_called_with(
1523 "snmp-gateway.test",
1524 ['_orch', 'deploy'],
1528 "name": 'snmp-gateway.test',
1530 "deploy_arguments": [],
1532 'tcp_ports': [9464],
1535 'service_name': 'snmp-gateway',
1540 'rank_generation': None,
1541 'extra_container_args': None,
1542 'extra_entrypoint_args': None,
1544 "config_blobs": config
,
1546 use_current_daemon_image
=False,
1549 @patch("cephadm.serve.CephadmServe._run_cephadm")
1550 def test_snmp_v2c_with_port(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1551 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1553 spec
= SNMPGatewaySpec(
1555 snmp_destination
='192.168.1.1:162',
1557 'snmp_community': 'public'
1562 "destination": spec
.snmp_destination
,
1563 "snmp_version": spec
.snmp_version
,
1564 "snmp_community": spec
.credentials
.get('snmp_community')
1567 with
with_host(cephadm_module
, 'test'):
1568 with
with_service(cephadm_module
, spec
):
1569 _run_cephadm
.assert_called_with(
1571 "snmp-gateway.test",
1572 ['_orch', 'deploy'],
1576 "name": 'snmp-gateway.test',
1578 "deploy_arguments": [],
1580 'tcp_ports': [9465],
1583 'service_name': 'snmp-gateway',
1588 'rank_generation': None,
1589 'extra_container_args': None,
1590 'extra_entrypoint_args': None,
1592 "config_blobs": config
,
1594 use_current_daemon_image
=False,
1597 @patch("cephadm.serve.CephadmServe._run_cephadm")
1598 def test_snmp_v3nopriv_deployment(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1599 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1601 spec
= SNMPGatewaySpec(
1603 snmp_destination
='192.168.1.1:162',
1604 engine_id
='8000C53F00000000',
1606 'snmp_v3_auth_username': 'myuser',
1607 'snmp_v3_auth_password': 'mypassword'
1611 'destination': spec
.snmp_destination
,
1612 'snmp_version': spec
.snmp_version
,
1613 'snmp_v3_auth_protocol': 'SHA',
1614 'snmp_v3_auth_username': 'myuser',
1615 'snmp_v3_auth_password': 'mypassword',
1616 'snmp_v3_engine_id': '8000C53F00000000'
1619 with
with_host(cephadm_module
, 'test'):
1620 with
with_service(cephadm_module
, spec
):
1621 _run_cephadm
.assert_called_with(
1623 "snmp-gateway.test",
1624 ['_orch', 'deploy'],
1628 "name": 'snmp-gateway.test',
1630 "deploy_arguments": [],
1632 'tcp_ports': [9464],
1635 'service_name': 'snmp-gateway',
1640 'rank_generation': None,
1641 'extra_container_args': None,
1642 'extra_entrypoint_args': None,
1644 "config_blobs": config
,
1646 use_current_daemon_image
=False,
1649 @patch("cephadm.serve.CephadmServe._run_cephadm")
1650 def test_snmp_v3priv_deployment(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1651 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1653 spec
= SNMPGatewaySpec(
1655 snmp_destination
='192.168.1.1:162',
1656 engine_id
='8000C53F00000000',
1657 auth_protocol
='MD5',
1658 privacy_protocol
='AES',
1660 'snmp_v3_auth_username': 'myuser',
1661 'snmp_v3_auth_password': 'mypassword',
1662 'snmp_v3_priv_password': 'mysecret',
1666 'destination': spec
.snmp_destination
,
1667 'snmp_version': spec
.snmp_version
,
1668 'snmp_v3_auth_protocol': 'MD5',
1669 'snmp_v3_auth_username': spec
.credentials
.get('snmp_v3_auth_username'),
1670 'snmp_v3_auth_password': spec
.credentials
.get('snmp_v3_auth_password'),
1671 'snmp_v3_engine_id': '8000C53F00000000',
1672 'snmp_v3_priv_protocol': spec
.privacy_protocol
,
1673 'snmp_v3_priv_password': spec
.credentials
.get('snmp_v3_priv_password'),
1676 with
with_host(cephadm_module
, 'test'):
1677 with
with_service(cephadm_module
, spec
):
1678 _run_cephadm
.assert_called_with(
1680 "snmp-gateway.test",
1681 ['_orch', 'deploy'],
1685 "name": 'snmp-gateway.test',
1687 "deploy_arguments": [],
1689 'tcp_ports': [9464],
1692 'service_name': 'snmp-gateway',
1697 'rank_generation': None,
1698 'extra_container_args': None,
1699 'extra_entrypoint_args': None,
1701 "config_blobs": config
,
1703 use_current_daemon_image
=False,
1707 class TestIngressService
:
1709 @pytest.mark
.parametrize(
1710 "enable_haproxy_protocol",
1713 @patch("cephadm.inventory.Inventory.get_addr")
1714 @patch("cephadm.utils.resolve_ip")
1715 @patch("cephadm.inventory.HostCache.get_daemons_by_service")
1716 @patch("cephadm.serve.CephadmServe._run_cephadm")
1717 def test_ingress_config_nfs_multiple_nfs_same_rank(
1720 _get_daemons_by_service
,
1721 _resolve_ip
, _get_addr
,
1722 cephadm_module
: CephadmOrchestrator
,
1723 enable_haproxy_protocol
: bool,
1725 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1727 def fake_resolve_ip(hostname
: str) -> str:
1728 if hostname
== 'host1':
1729 return '192.168.122.111'
1730 elif hostname
== 'host2':
1731 return '192.168.122.222'
1733 return 'xxx.xxx.xxx.xxx'
1734 _resolve_ip
.side_effect
= fake_resolve_ip
1736 def fake_get_addr(hostname
: str) -> str:
1738 _get_addr
.side_effect
= fake_get_addr
1740 nfs_service
= NFSServiceSpec(
1742 placement
=PlacementSpec(
1744 hosts
=['host1', 'host2']),
1746 enable_haproxy_protocol
=enable_haproxy_protocol
,
1749 ispec
= IngressSpec(
1750 service_type
='ingress',
1751 service_id
='nfs.foo',
1752 backend_service
='nfs.foo',
1755 virtual_ip
='192.168.122.100/24',
1756 monitor_user
='admin',
1757 monitor_password
='12345',
1758 keepalived_password
='12345',
1759 enable_haproxy_protocol
=enable_haproxy_protocol
,
1762 cephadm_module
.spec_store
._specs
= {
1763 'nfs.foo': nfs_service
,
1764 'ingress.nfs.foo': ispec
1766 cephadm_module
.spec_store
.spec_created
= {
1767 'nfs.foo': datetime_now(),
1768 'ingress.nfs.foo': datetime_now()
1771 # in both test cases we'll do here, we want only the ip
1772 # for the host1 nfs daemon as we'll end up giving that
1773 # one higher rank_generation but the same rank as the one
1776 '# This file is generated by cephadm.\n'
1778 ' log 127.0.0.1 local2\n'
1779 ' chroot /var/lib/haproxy\n'
1780 ' pidfile /var/lib/haproxy/haproxy.pid\n'
1783 ' stats socket /var/lib/haproxy/stats\n\n'
1787 ' timeout queue 1m\n'
1788 ' timeout connect 10s\n'
1789 ' timeout client 1m\n'
1790 ' timeout server 1m\n'
1791 ' timeout check 10s\n'
1795 ' bind 192.168.122.100:9049\n'
1796 ' bind host1:9049\n'
1798 ' stats uri /stats\n'
1799 ' stats refresh 10s\n'
1800 ' stats auth admin:12345\n'
1801 ' http-request use-service prometheus-exporter if { path /metrics }\n'
1802 ' monitor-uri /health\n\n'
1803 'frontend frontend\n'
1804 ' bind 192.168.122.100:2049\n'
1805 ' default_backend backend\n\n'
1809 ' hash-type consistent\n'
1811 if enable_haproxy_protocol
:
1812 haproxy_txt
+= ' default-server send-proxy-v2\n'
1813 haproxy_txt
+= ' server nfs.foo.0 192.168.122.111:12049 check\n'
1814 haproxy_expected_conf
= {
1815 'files': {'haproxy.cfg': haproxy_txt
}
1818 # verify we get the same cfg regardless of the order in which the nfs daemons are returned
1819 # in this case both nfs are rank 0, so it should only take the one with rank_generation 1 a.k.a
1822 DaemonDescription(daemon_type
='nfs', daemon_id
='foo.0.1.host1.qwerty', hostname
='host1', rank
=0, rank_generation
=1, ports
=[12049]),
1823 DaemonDescription(daemon_type
='nfs', daemon_id
='foo.0.0.host2.abcdef', hostname
='host2', rank
=0, rank_generation
=0, ports
=[12049])
1825 _get_daemons_by_service
.return_value
= nfs_daemons
1827 haproxy_generated_conf
= cephadm_module
.cephadm_services
['ingress'].haproxy_generate_config(
1828 CephadmDaemonDeploySpec(host
='host1', daemon_id
='ingress', service_name
=ispec
.service_name()))
1830 assert haproxy_generated_conf
[0] == haproxy_expected_conf
1832 # swapping order now, should still pick out the one with the higher rank_generation
1833 # in this case both nfs are rank 0, so it should only take the one with rank_generation 1 a.k.a
1836 DaemonDescription(daemon_type
='nfs', daemon_id
='foo.0.0.host2.abcdef', hostname
='host2', rank
=0, rank_generation
=0, ports
=[12049]),
1837 DaemonDescription(daemon_type
='nfs', daemon_id
='foo.0.1.host1.qwerty', hostname
='host1', rank
=0, rank_generation
=1, ports
=[12049])
1839 _get_daemons_by_service
.return_value
= nfs_daemons
1841 haproxy_generated_conf
= cephadm_module
.cephadm_services
['ingress'].haproxy_generate_config(
1842 CephadmDaemonDeploySpec(host
='host1', daemon_id
='ingress', service_name
=ispec
.service_name()))
1844 assert haproxy_generated_conf
[0] == haproxy_expected_conf
1846 @patch("cephadm.serve.CephadmServe._run_cephadm")
1847 def test_ingress_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1848 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1850 with
with_host(cephadm_module
, 'test', addr
='1.2.3.7'):
1851 cephadm_module
.cache
.update_host_networks('test', {
1857 # the ingress backend
1858 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
1859 rgw_frontend_type
='beast')
1861 ispec
= IngressSpec(service_type
='ingress',
1863 backend_service
='rgw.foo',
1866 monitor_user
='admin',
1867 monitor_password
='12345',
1868 keepalived_password
='12345',
1869 virtual_interface_networks
=['1.2.3.0/24'],
1870 virtual_ip
="1.2.3.4/32")
1871 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
1872 # generate the keepalived conf based on the specified spec
1873 keepalived_generated_conf
= cephadm_module
.cephadm_services
['ingress'].keepalived_generate_config(
1874 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
1876 keepalived_expected_conf
= {
1880 '# This file is generated by cephadm.\n'
1882 'enable_script_security\n '
1883 'script_user root\n'
1885 'vrrp_script check_backend {\n '
1886 'script "/usr/bin/curl http://1.2.3.7:8999/health"\n '
1891 'vrrp_instance VI_0 {\n '
1895 'virtual_router_id 50\n '
1897 'authentication {\n '
1899 'auth_pass 12345\n '
1901 'unicast_src_ip 1.2.3.4\n '
1904 'virtual_ipaddress {\n '
1905 '1.2.3.4/32 dev if0\n '
1908 'check_backend\n }\n'
1913 # check keepalived config
1914 assert keepalived_generated_conf
[0] == keepalived_expected_conf
1916 # generate the haproxy conf based on the specified spec
1917 haproxy_generated_conf
= cephadm_module
.cephadm_services
['ingress'].haproxy_generate_config(
1918 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
1920 haproxy_expected_conf
= {
1924 '# This file is generated by cephadm.'
1926 '127.0.0.1 local2\n '
1927 'chroot /var/lib/haproxy\n '
1928 'pidfile /var/lib/haproxy/haproxy.pid\n '
1931 'stats socket /var/lib/haproxy/stats\n'
1936 'option dontlognull\n '
1937 'option http-server-close\n '
1938 'option forwardfor except 127.0.0.0/8\n '
1939 'option redispatch\n '
1941 'timeout queue 20s\n '
1942 'timeout connect 5s\n '
1943 'timeout http-request 1s\n '
1944 'timeout http-keep-alive 5s\n '
1945 'timeout client 30s\n '
1946 'timeout server 30s\n '
1947 'timeout check 5s\n '
1949 '\nfrontend stats\n '
1951 'bind 1.2.3.4:8999\n '
1952 'bind 1.2.3.7:8999\n '
1954 'stats uri /stats\n '
1955 'stats refresh 10s\n '
1956 'stats auth admin:12345\n '
1957 'http-request use-service prometheus-exporter if { path /metrics }\n '
1958 'monitor-uri /health\n'
1959 '\nfrontend frontend\n '
1960 'bind 1.2.3.4:8089\n '
1961 'default_backend backend\n\n'
1962 'backend backend\n '
1963 'option forwardfor\n '
1964 'balance static-rr\n '
1965 'option httpchk HEAD / HTTP/1.0\n '
1967 + haproxy_generated_conf
[1][0] + ' 1.2.3.7:80 check weight 100 inter 2s\n'
1971 assert haproxy_generated_conf
[0] == haproxy_expected_conf
1973 @patch("cephadm.serve.CephadmServe._run_cephadm")
1974 def test_ingress_config_ssl_rgw(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
1975 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
1977 with
with_host(cephadm_module
, 'test'):
1978 cephadm_module
.cache
.update_host_networks('test', {
1984 # the ingress backend
1985 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
1986 rgw_frontend_type
='beast', rgw_frontend_port
=443, ssl
=True)
1988 ispec
= IngressSpec(service_type
='ingress',
1990 backend_service
='rgw.foo',
1993 monitor_user
='admin',
1994 monitor_password
='12345',
1995 keepalived_password
='12345',
1996 virtual_interface_networks
=['1.2.3.0/24'],
1997 virtual_ip
="1.2.3.4/32")
1998 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
1999 # generate the keepalived conf based on the specified spec
2000 keepalived_generated_conf
= cephadm_module
.cephadm_services
['ingress'].keepalived_generate_config(
2001 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
2003 keepalived_expected_conf
= {
2007 '# This file is generated by cephadm.\n'
2009 'enable_script_security\n '
2010 'script_user root\n'
2012 'vrrp_script check_backend {\n '
2013 'script "/usr/bin/curl http://[1::4]:8999/health"\n '
2018 'vrrp_instance VI_0 {\n '
2022 'virtual_router_id 50\n '
2024 'authentication {\n '
2026 'auth_pass 12345\n '
2028 'unicast_src_ip 1.2.3.1\n '
2031 'virtual_ipaddress {\n '
2032 '1.2.3.4/32 dev if0\n '
2035 'check_backend\n }\n'
2040 # check keepalived config
2041 assert keepalived_generated_conf
[0] == keepalived_expected_conf
2043 # generate the haproxy conf based on the specified spec
2044 haproxy_generated_conf
= cephadm_module
.cephadm_services
['ingress'].haproxy_generate_config(
2045 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
2047 haproxy_expected_conf
= {
2051 '# This file is generated by cephadm.'
2053 '127.0.0.1 local2\n '
2054 'chroot /var/lib/haproxy\n '
2055 'pidfile /var/lib/haproxy/haproxy.pid\n '
2058 'stats socket /var/lib/haproxy/stats\n'
2063 'option dontlognull\n '
2064 'option http-server-close\n '
2065 'option forwardfor except 127.0.0.0/8\n '
2066 'option redispatch\n '
2068 'timeout queue 20s\n '
2069 'timeout connect 5s\n '
2070 'timeout http-request 1s\n '
2071 'timeout http-keep-alive 5s\n '
2072 'timeout client 30s\n '
2073 'timeout server 30s\n '
2074 'timeout check 5s\n '
2076 '\nfrontend stats\n '
2078 'bind 1.2.3.4:8999\n '
2081 'stats uri /stats\n '
2082 'stats refresh 10s\n '
2083 'stats auth admin:12345\n '
2084 'http-request use-service prometheus-exporter if { path /metrics }\n '
2085 'monitor-uri /health\n'
2086 '\nfrontend frontend\n '
2087 'bind 1.2.3.4:8089\n '
2088 'default_backend backend\n\n'
2089 'backend backend\n '
2090 'option forwardfor\n '
2091 'default-server ssl\n '
2092 'default-server verify none\n '
2093 'balance static-rr\n '
2094 'option httpchk HEAD / HTTP/1.0\n '
2096 + haproxy_generated_conf
[1][0] + ' 1::4:443 check weight 100 inter 2s\n'
2100 assert haproxy_generated_conf
[0] == haproxy_expected_conf
2102 @patch("cephadm.serve.CephadmServe._run_cephadm")
2103 def test_ingress_config_multi_vips(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2104 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2106 with
with_host(cephadm_module
, 'test', addr
='1.2.3.7'):
2107 cephadm_module
.cache
.update_host_networks('test', {
2113 # Check the ingress with multiple VIPs
2114 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
2115 rgw_frontend_type
='beast')
2117 ispec
= IngressSpec(service_type
='ingress',
2119 backend_service
='rgw.foo',
2122 monitor_user
='admin',
2123 monitor_password
='12345',
2124 keepalived_password
='12345',
2125 virtual_interface_networks
=['1.2.3.0/24'],
2126 virtual_ips_list
=["1.2.3.4/32"])
2127 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
2128 # generate the keepalived conf based on the specified spec
2129 # Test with only 1 IP on the list, as it will fail with more VIPS but only one host.
2130 keepalived_generated_conf
= cephadm_module
.cephadm_services
['ingress'].keepalived_generate_config(
2131 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
2133 keepalived_expected_conf
= {
2137 '# This file is generated by cephadm.\n'
2139 'enable_script_security\n '
2140 'script_user root\n'
2142 'vrrp_script check_backend {\n '
2143 'script "/usr/bin/curl http://1.2.3.7:8999/health"\n '
2148 'vrrp_instance VI_0 {\n '
2152 'virtual_router_id 50\n '
2154 'authentication {\n '
2156 'auth_pass 12345\n '
2158 'unicast_src_ip 1.2.3.1\n '
2161 'virtual_ipaddress {\n '
2162 '1.2.3.4/32 dev if0\n '
2165 'check_backend\n }\n'
2170 # check keepalived config
2171 assert keepalived_generated_conf
[0] == keepalived_expected_conf
2173 # generate the haproxy conf based on the specified spec
2174 haproxy_generated_conf
= cephadm_module
.cephadm_services
['ingress'].haproxy_generate_config(
2175 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
2177 haproxy_expected_conf
= {
2181 '# This file is generated by cephadm.'
2183 '127.0.0.1 local2\n '
2184 'chroot /var/lib/haproxy\n '
2185 'pidfile /var/lib/haproxy/haproxy.pid\n '
2188 'stats socket /var/lib/haproxy/stats\n'
2193 'option dontlognull\n '
2194 'option http-server-close\n '
2195 'option forwardfor except 127.0.0.0/8\n '
2196 'option redispatch\n '
2198 'timeout queue 20s\n '
2199 'timeout connect 5s\n '
2200 'timeout http-request 1s\n '
2201 'timeout http-keep-alive 5s\n '
2202 'timeout client 30s\n '
2203 'timeout server 30s\n '
2204 'timeout check 5s\n '
2206 '\nfrontend stats\n '
2209 'bind 1.2.3.7:8999\n '
2211 'stats uri /stats\n '
2212 'stats refresh 10s\n '
2213 'stats auth admin:12345\n '
2214 'http-request use-service prometheus-exporter if { path /metrics }\n '
2215 'monitor-uri /health\n'
2216 '\nfrontend frontend\n '
2218 'default_backend backend\n\n'
2219 'backend backend\n '
2220 'option forwardfor\n '
2221 'balance static-rr\n '
2222 'option httpchk HEAD / HTTP/1.0\n '
2224 + haproxy_generated_conf
[1][0] + ' 1.2.3.7:80 check weight 100 inter 2s\n'
2228 assert haproxy_generated_conf
[0] == haproxy_expected_conf
2230 @patch("cephadm.serve.CephadmServe._run_cephadm")
2231 def test_keepalive_config_multi_interface_vips(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2232 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2234 with
with_host(cephadm_module
, 'test', addr
='1.2.3.1'):
2235 with
with_host(cephadm_module
, 'test2', addr
='1.2.3.2'):
2236 cephadm_module
.cache
.update_host_networks('test', {
2240 '100.100.100.0/24': {
2241 'if1': ['100.100.100.1']
2244 cephadm_module
.cache
.update_host_networks('test2', {
2248 '100.100.100.0/24': {
2249 'if1': ['100.100.100.2']
2253 # Check the ingress with multiple VIPs
2254 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
2255 rgw_frontend_type
='beast')
2257 ispec
= IngressSpec(service_type
='ingress',
2259 placement
=PlacementSpec(hosts
=['test', 'test2']),
2260 backend_service
='rgw.foo',
2263 monitor_user
='admin',
2264 monitor_password
='12345',
2265 keepalived_password
='12345',
2266 virtual_ips_list
=["1.2.3.100/24", "100.100.100.100/24"])
2267 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
2268 keepalived_generated_conf
= cephadm_module
.cephadm_services
['ingress'].keepalived_generate_config(
2269 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
2271 keepalived_expected_conf
= {
2275 '# This file is generated by cephadm.\n'
2277 'enable_script_security\n '
2278 'script_user root\n'
2280 'vrrp_script check_backend {\n '
2281 'script "/usr/bin/curl http://1.2.3.1:8999/health"\n '
2286 'vrrp_instance VI_0 {\n '
2290 'virtual_router_id 50\n '
2292 'authentication {\n '
2294 'auth_pass 12345\n '
2296 'unicast_src_ip 1.2.3.1\n '
2300 'virtual_ipaddress {\n '
2301 '1.2.3.100/24 dev if0\n '
2304 'check_backend\n }\n'
2306 'vrrp_instance VI_1 {\n '
2310 'virtual_router_id 51\n '
2312 'authentication {\n '
2314 'auth_pass 12345\n '
2316 'unicast_src_ip 100.100.100.1\n '
2320 'virtual_ipaddress {\n '
2321 '100.100.100.100/24 dev if1\n '
2324 'check_backend\n }\n'
2329 # check keepalived config
2330 assert keepalived_generated_conf
[0] == keepalived_expected_conf
2332 @patch("cephadm.serve.CephadmServe._run_cephadm")
2333 def test_keepalive_interface_host_filtering(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2334 # we need to make sure keepalive daemons will have an interface
2335 # on the hosts we deploy them on in order to set up their VIP.
2336 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2338 with
with_host(cephadm_module
, 'test', addr
='1.2.3.1'):
2339 with
with_host(cephadm_module
, 'test2', addr
='1.2.3.2'):
2340 with
with_host(cephadm_module
, 'test3', addr
='1.2.3.3'):
2341 with
with_host(cephadm_module
, 'test4', addr
='1.2.3.3'):
2342 # setup "test" and "test4" to have all the necessary interfaces,
2343 # "test2" to have one of them (should still be filtered)
2344 # and "test3" to have none of them
2345 cephadm_module
.cache
.update_host_networks('test', {
2349 '100.100.100.0/24': {
2350 'if1': ['100.100.100.1']
2353 cephadm_module
.cache
.update_host_networks('test2', {
2358 cephadm_module
.cache
.update_host_networks('test4', {
2362 '100.100.100.0/24': {
2363 'if1': ['100.100.100.4']
2367 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
2368 rgw_frontend_type
='beast')
2370 ispec
= IngressSpec(service_type
='ingress',
2372 placement
=PlacementSpec(hosts
=['test', 'test2', 'test3', 'test4']),
2373 backend_service
='rgw.foo',
2376 monitor_user
='admin',
2377 monitor_password
='12345',
2378 keepalived_password
='12345',
2379 virtual_ips_list
=["1.2.3.100/24", "100.100.100.100/24"])
2380 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
2381 # since we're never actually going to refresh the host here,
2382 # check the tmp daemons to see what was placed during the apply
2383 daemons
= cephadm_module
.cache
._get
_tmp
_daemons
()
2384 keepalive_daemons
= [d
for d
in daemons
if d
.daemon_type
== 'keepalived']
2385 hosts_deployed_on
= [d
.hostname
for d
in keepalive_daemons
]
2386 assert 'test' in hosts_deployed_on
2387 assert 'test2' not in hosts_deployed_on
2388 assert 'test3' not in hosts_deployed_on
2389 assert 'test4' in hosts_deployed_on
2391 @patch("cephadm.serve.CephadmServe._run_cephadm")
2392 def test_haproxy_port_ips(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2393 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2395 with
with_host(cephadm_module
, 'test', addr
='1.2.3.7'):
2396 cephadm_module
.cache
.update_host_networks('test', {
2398 'if0': ['1.2.3.4/32']
2402 # Check the ingress with multiple VIPs
2403 s
= RGWSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
2404 rgw_frontend_type
='beast')
2407 frontend_port
= 8089
2409 ispec
= IngressSpec(service_type
='ingress',
2411 backend_service
='rgw.foo',
2412 frontend_port
=frontend_port
,
2414 monitor_user
='admin',
2415 monitor_password
='12345',
2416 keepalived_password
='12345',
2417 virtual_ip
=f
"{ip}/24")
2418 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
2419 # generate the haproxy conf based on the specified spec
2420 haproxy_daemon_spec
= cephadm_module
.cephadm_services
['ingress'].prepare_create(
2421 CephadmDaemonDeploySpec(
2423 daemon_type
='haproxy',
2424 daemon_id
='ingress',
2425 service_name
=ispec
.service_name()))
2427 assert haproxy_daemon_spec
.port_ips
== {str(frontend_port
): ip
}
2429 @patch("cephadm.serve.CephadmServe._run_cephadm")
2430 @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
2431 @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
2432 @patch("cephadm.services.nfs.NFSService.purge", MagicMock())
2433 @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock())
2434 def test_keepalive_only_nfs_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2435 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2437 with
with_host(cephadm_module
, 'test', addr
='1.2.3.7'):
2438 cephadm_module
.cache
.update_host_networks('test', {
2444 # Check the ingress with multiple VIPs
2445 s
= NFSServiceSpec(service_id
="foo", placement
=PlacementSpec(count
=1),
2446 virtual_ip
='1.2.3.0/24')
2448 ispec
= IngressSpec(service_type
='ingress',
2450 backend_service
='nfs.foo',
2452 monitor_user
='admin',
2453 monitor_password
='12345',
2454 keepalived_password
='12345',
2455 virtual_ip
='1.2.3.0/24',
2456 keepalive_only
=True)
2457 with
with_service(cephadm_module
, s
) as _
, with_service(cephadm_module
, ispec
) as _
:
2458 nfs_generated_conf
, _
= cephadm_module
.cephadm_services
['nfs'].generate_config(
2459 CephadmDaemonDeploySpec(host
='test', daemon_id
='foo.test.0.0', service_name
=s
.service_name()))
2460 ganesha_conf
= nfs_generated_conf
['files']['ganesha.conf']
2461 assert "Bind_addr = 1.2.3.0/24" in ganesha_conf
2463 keepalived_generated_conf
= cephadm_module
.cephadm_services
['ingress'].keepalived_generate_config(
2464 CephadmDaemonDeploySpec(host
='test', daemon_id
='ingress', service_name
=ispec
.service_name()))
2466 keepalived_expected_conf
= {
2470 '# This file is generated by cephadm.\n'
2472 'enable_script_security\n '
2473 'script_user root\n'
2475 'vrrp_script check_backend {\n '
2476 'script "/usr/bin/false"\n '
2481 'vrrp_instance VI_0 {\n '
2485 'virtual_router_id 50\n '
2487 'authentication {\n '
2489 'auth_pass 12345\n '
2491 'unicast_src_ip 1.2.3.1\n '
2494 'virtual_ipaddress {\n '
2495 '1.2.3.0/24 dev if0\n '
2498 'check_backend\n }\n'
2503 # check keepalived config
2504 assert keepalived_generated_conf
[0] == keepalived_expected_conf
2506 @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
2507 @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
2508 @patch("cephadm.services.nfs.NFSService.purge", MagicMock())
2509 @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock())
2510 @patch("cephadm.inventory.Inventory.keys")
2511 @patch("cephadm.inventory.Inventory.get_addr")
2512 @patch("cephadm.utils.resolve_ip")
2513 @patch("cephadm.inventory.HostCache.get_daemons_by_service")
2514 @patch("cephadm.serve.CephadmServe._run_cephadm")
2515 def test_ingress_config_nfs_proxy_protocol(
2518 _get_daemons_by_service
,
2522 cephadm_module
: CephadmOrchestrator
,
2524 """Verify that setting enable_haproxy_protocol for both ingress and
2525 nfs services sets the desired configuration parameters in both
2526 the haproxy config and nfs ganesha config.
2528 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2530 def fake_resolve_ip(hostname
: str) -> str:
2531 if hostname
in ('host1', "192.168.122.111"):
2532 return '192.168.122.111'
2533 elif hostname
in ('host2', '192.168.122.222'):
2534 return '192.168.122.222'
2536 raise KeyError(hostname
)
2537 _resolve_ip
.side_effect
= fake_resolve_ip
2538 _get_addr
.side_effect
= fake_resolve_ip
2541 return ['host1', 'host2']
2542 _inventory_keys
.side_effect
= fake_keys
2544 nfs_service
= NFSServiceSpec(
2546 placement
=PlacementSpec(
2548 hosts
=['host1', 'host2']),
2550 enable_haproxy_protocol
=True,
2554 ispec
= IngressSpec(
2555 service_type
='ingress',
2556 service_id
='nfs.foo',
2557 backend_service
='nfs.foo',
2560 virtual_ip
='192.168.122.100/24',
2561 monitor_user
='admin',
2562 monitor_password
='12345',
2563 keepalived_password
='12345',
2564 enable_haproxy_protocol
=True,
2567 cephadm_module
.spec_store
._specs
= {
2568 'nfs.foo': nfs_service
,
2569 'ingress.nfs.foo': ispec
2571 cephadm_module
.spec_store
.spec_created
= {
2572 'nfs.foo': datetime_now(),
2573 'ingress.nfs.foo': datetime_now()
2577 '# This file is generated by cephadm.\n'
2579 ' log 127.0.0.1 local2\n'
2580 ' chroot /var/lib/haproxy\n'
2581 ' pidfile /var/lib/haproxy/haproxy.pid\n'
2584 ' stats socket /var/lib/haproxy/stats\n\n'
2588 ' timeout queue 1m\n'
2589 ' timeout connect 10s\n'
2590 ' timeout client 1m\n'
2591 ' timeout server 1m\n'
2592 ' timeout check 10s\n'
2596 ' bind 192.168.122.100:9049\n'
2597 ' bind 192.168.122.111:9049\n'
2599 ' stats uri /stats\n'
2600 ' stats refresh 10s\n'
2601 ' stats auth admin:12345\n'
2602 ' http-request use-service prometheus-exporter if { path /metrics }\n'
2603 ' monitor-uri /health\n\n'
2604 'frontend frontend\n'
2605 ' bind 192.168.122.100:2049\n'
2606 ' default_backend backend\n\n'
2610 ' hash-type consistent\n'
2611 ' default-server send-proxy-v2\n'
2612 ' server nfs.foo.0 192.168.122.111:12049 check\n'
2614 haproxy_expected_conf
= {
2615 'files': {'haproxy.cfg': haproxy_txt
}
2619 "# This file is generated by cephadm.\n"
2620 'NFS_CORE_PARAM {\n'
2621 ' Enable_NLM = true;\n'
2622 ' Enable_RQUOTA = false;\n'
2624 ' NFS_Port = 2049;\n'
2625 ' HAProxy_Hosts = 192.168.122.111, 10.10.2.20, 192.168.122.222;\n'
2629 ' Delegations = false;\n'
2630 " RecoveryBackend = 'rados_cluster';\n"
2631 ' Minor_Versions = 1, 2;\n'
2632 ' IdmapConf = "/etc/ganesha/idmap.conf";\n'
2636 ' UserId = "nfs.foo.test.0.0";\n'
2637 ' nodeid = "nfs.foo.None";\n'
2639 ' namespace = "foo";\n'
2643 ' UserId = "nfs.foo.test.0.0";\n'
2645 '"rados://.nfs/foo/conf-nfs.foo";\n'
2649 ' cluster = "ceph";\n'
2650 ' name = "client.nfs.foo.test.0.0-rgw";\n'
2653 "%url rados://.nfs/foo/conf-nfs.foo"
2655 nfs_expected_conf
= {
2656 'files': {'ganesha.conf': nfs_ganesha_txt
, 'idmap.conf': ''},
2658 'extra_args': ['-N', 'NIV_EVENT'],
2660 '[client.nfs.foo.test.0.0]\n'
2668 '[client.nfs.foo.test.0.0-rgw]\n'
2671 'user': 'nfs.foo.test.0.0-rgw',
2673 'userid': 'nfs.foo.test.0.0',
2679 daemon_id
='foo.0.1.host1.qwerty',
2687 daemon_id
='foo.0.0.host2.abcdef',
2694 _get_daemons_by_service
.return_value
= nfs_daemons
2696 ingress_svc
= cephadm_module
.cephadm_services
['ingress']
2697 nfs_svc
= cephadm_module
.cephadm_services
['nfs']
2699 # add host network info to one host to test the behavior of
2700 # adding all known-good addresses of the host to the list.
2701 cephadm_module
.cache
.update_host_networks('host1', {
2702 # this one is additional
2704 'eth1': ['10.10.2.20']
2706 # this is redundant and will be skipped
2707 '192.168.122.0/24': {
2708 'eth0': ['192.168.122.111']
2710 # this is a link-local address and will be ignored
2713 "fe80::8cf5:25ff:fe1c:d963"
2716 "fe80::c7b:cbff:fef6:7370"
2719 "fe80::7201:25a7:390b:d9a7"
2724 haproxy_generated_conf
, _
= ingress_svc
.haproxy_generate_config(
2725 CephadmDaemonDeploySpec(
2727 daemon_id
='ingress',
2728 service_name
=ispec
.service_name(),
2731 assert haproxy_generated_conf
== haproxy_expected_conf
2733 nfs_generated_conf
, _
= nfs_svc
.generate_config(
2734 CephadmDaemonDeploySpec(
2736 daemon_id
='foo.test.0.0',
2737 service_name
=nfs_service
.service_name(),
2740 assert nfs_generated_conf
== nfs_expected_conf
2743 class TestCephFsMirror
:
2744 @patch("cephadm.serve.CephadmServe._run_cephadm")
2745 def test_config(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2746 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2747 with
with_host(cephadm_module
, 'test'):
2748 with
with_service(cephadm_module
, ServiceSpec('cephfs-mirror')):
2749 cephadm_module
.assert_issued_mon_command({
2750 'prefix': 'mgr module enable',
2751 'module': 'mirroring'
2756 @patch("cephadm.serve.CephadmServe._run_cephadm")
2757 def test_jaeger_query(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2758 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2760 spec
= TracingSpec(es_nodes
="192.168.0.1:9200",
2761 service_type
="jaeger-query")
2763 config
= {"elasticsearch_nodes": "http://192.168.0.1:9200"}
2765 with
with_host(cephadm_module
, 'test'):
2766 with
with_service(cephadm_module
, spec
):
2767 _run_cephadm
.assert_called_with(
2769 "jaeger-query.test",
2770 ['_orch', 'deploy'],
2774 "name": 'jaeger-query.test',
2776 "deploy_arguments": [],
2778 'tcp_ports': [16686],
2781 'service_name': 'jaeger-query',
2786 'rank_generation': None,
2787 'extra_container_args': None,
2788 'extra_entrypoint_args': None,
2790 "config_blobs": config
,
2792 use_current_daemon_image
=False,
2795 @patch("cephadm.serve.CephadmServe._run_cephadm")
2796 def test_jaeger_collector_es_deploy(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2797 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2799 collector_spec
= TracingSpec(service_type
="jaeger-collector")
2800 es_spec
= TracingSpec(service_type
="elasticsearch")
2803 with
with_host(cephadm_module
, 'test'):
2804 collector_config
= {
2805 "elasticsearch_nodes": f
'http://{build_url(host=cephadm_module.inventory.get_addr("test"), port=9200).lstrip("/")}'}
2806 with
with_service(cephadm_module
, es_spec
):
2807 _run_cephadm
.assert_called_with(
2809 "elasticsearch.test",
2810 ['_orch', 'deploy'],
2814 "name": 'elasticsearch.test',
2816 "deploy_arguments": [],
2818 'tcp_ports': [9200],
2821 'service_name': 'elasticsearch',
2826 'rank_generation': None,
2827 'extra_container_args': None,
2828 'extra_entrypoint_args': None,
2830 "config_blobs": es_config
,
2832 use_current_daemon_image
=False,
2834 with
with_service(cephadm_module
, collector_spec
):
2835 _run_cephadm
.assert_called_with(
2837 "jaeger-collector.test",
2838 ['_orch', 'deploy'],
2842 "name": 'jaeger-collector.test',
2844 "deploy_arguments": [],
2846 'tcp_ports': [14250],
2849 'service_name': 'jaeger-collector',
2854 'rank_generation': None,
2855 'extra_container_args': None,
2856 'extra_entrypoint_args': None,
2858 "config_blobs": collector_config
,
2860 use_current_daemon_image
=False,
2863 @patch("cephadm.serve.CephadmServe._run_cephadm")
2864 def test_jaeger_agent(self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
):
2865 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2867 collector_spec
= TracingSpec(service_type
="jaeger-collector", es_nodes
="192.168.0.1:9200")
2868 collector_config
= {"elasticsearch_nodes": "http://192.168.0.1:9200"}
2870 agent_spec
= TracingSpec(service_type
="jaeger-agent")
2871 agent_config
= {"collector_nodes": "test:14250"}
2873 with
with_host(cephadm_module
, 'test'):
2874 with
with_service(cephadm_module
, collector_spec
):
2875 _run_cephadm
.assert_called_with(
2877 "jaeger-collector.test",
2878 ['_orch', 'deploy'],
2882 "name": 'jaeger-collector.test',
2884 "deploy_arguments": [],
2886 'tcp_ports': [14250],
2889 'service_name': 'jaeger-collector',
2894 'rank_generation': None,
2895 'extra_container_args': None,
2896 'extra_entrypoint_args': None,
2898 "config_blobs": collector_config
,
2900 use_current_daemon_image
=False,
2902 with
with_service(cephadm_module
, agent_spec
):
2903 _run_cephadm
.assert_called_with(
2905 "jaeger-agent.test",
2906 ['_orch', 'deploy'],
2910 "name": 'jaeger-agent.test',
2912 "deploy_arguments": [],
2914 'tcp_ports': [6799],
2917 'service_name': 'jaeger-agent',
2922 'rank_generation': None,
2923 'extra_container_args': None,
2924 'extra_entrypoint_args': None,
2926 "config_blobs": agent_config
,
2928 use_current_daemon_image
=False,
2932 class TestCustomContainer
:
2933 @patch("cephadm.serve.CephadmServe._run_cephadm")
2934 def test_deploy_custom_container(
2935 self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
2937 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2939 spec
= CustomContainerSpec(
2940 service_id
='tsettinu',
2941 image
='quay.io/foobar/barbaz:latest',
2942 entrypoint
='/usr/local/bin/blat.sh',
2946 with
with_host(cephadm_module
, 'test'):
2947 with
with_service(cephadm_module
, spec
):
2948 _run_cephadm
.assert_called_with(
2950 "container.tsettinu.test",
2951 ['_orch', 'deploy'],
2956 "name": 'container.tsettinu.test',
2957 "image": 'quay.io/foobar/barbaz:latest',
2958 "deploy_arguments": [],
2960 'tcp_ports': [9090],
2963 'service_name': 'container.tsettinu',
2968 'rank_generation': None,
2969 'extra_container_args': None,
2970 'extra_entrypoint_args': None,
2973 "image": "quay.io/foobar/barbaz:latest",
2974 "entrypoint": "/usr/local/bin/blat.sh",
2977 "volume_mounts": {},
2978 "privileged": False,
2985 use_current_daemon_image
=False,
2988 @patch("cephadm.serve.CephadmServe._run_cephadm")
2989 def test_deploy_custom_container_with_init_ctrs(
2990 self
, _run_cephadm
, cephadm_module
: CephadmOrchestrator
2992 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
2994 spec
= CustomContainerSpec(
2995 service_id
='tsettinu',
2996 image
='quay.io/foobar/barbaz:latest',
2997 entrypoint
='/usr/local/bin/blat.sh',
3000 {'entrypoint': '/usr/local/bin/prepare.sh'},
3002 'entrypoint': '/usr/local/bin/optimize.sh',
3003 'entrypoint_args': [
3006 {'argument': '--title=Alpha One'},
3014 'name': 'container.tsettinu.test',
3015 'image': 'quay.io/foobar/barbaz:latest',
3016 'deploy_arguments': [],
3018 'tcp_ports': [9090],
3019 'init_containers': [
3020 {'entrypoint': '/usr/local/bin/prepare.sh'},
3022 'entrypoint': '/usr/local/bin/optimize.sh',
3023 'entrypoint_args': [
3026 '--title=Alpha One',
3032 'service_name': 'container.tsettinu',
3037 'rank_generation': None,
3038 'extra_container_args': None,
3039 'extra_entrypoint_args': None,
3040 'init_containers': [
3041 {'entrypoint': '/usr/local/bin/prepare.sh'},
3043 'entrypoint': '/usr/local/bin/optimize.sh',
3044 'entrypoint_args': [
3047 {'argument': '--title=Alpha One', 'split': False},
3053 'image': 'quay.io/foobar/barbaz:latest',
3054 'entrypoint': '/usr/local/bin/blat.sh',
3057 'volume_mounts': {},
3058 'privileged': False,
3064 with
with_host(cephadm_module
, 'test'):
3065 with
with_service(cephadm_module
, spec
):
3066 _run_cephadm
.assert_called_with(
3068 'container.tsettinu.test',
3069 ['_orch', 'deploy'],
3071 stdin
=json
.dumps(expected
),
3072 use_current_daemon_image
=False,
3077 @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
3078 @patch("cephadm.serve.CephadmServe._run_cephadm")
3079 def test_deploy_smb(
3080 self
, _run_cephadm
, _get_uname
, cephadm_module
: CephadmOrchestrator
3082 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
3083 _get_uname
.return_value
= 'tango.briskly'
3086 cluster_id
='foxtrot',
3087 config_uri
='rados://.smb/foxtrot/config.json',
3092 'name': 'smb.tango.briskly',
3094 'deploy_arguments': [],
3097 'service_name': 'smb',
3102 'rank_generation': None,
3103 'extra_container_args': None,
3104 'extra_entrypoint_args': None,
3107 'cluster_id': 'foxtrot',
3109 'config_uri': 'rados://.smb/foxtrot/config.json',
3111 'keyring': '[client.smb.config.tango.briskly]\nkey = None\n',
3112 'config_auth_entity': 'client.smb.config.tango.briskly',
3115 with
with_host(cephadm_module
, 'hostx'):
3116 with
with_service(cephadm_module
, spec
):
3117 _run_cephadm
.assert_called_with(
3119 'smb.tango.briskly',
3120 ['_orch', 'deploy'],
3122 stdin
=json
.dumps(expected
),
3123 use_current_daemon_image
=False
3126 @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
3127 @patch("cephadm.serve.CephadmServe._run_cephadm")
3128 def test_deploy_smb_join_dns(
3129 self
, _run_cephadm
, _get_uname
, cephadm_module
: CephadmOrchestrator
3131 _run_cephadm
.side_effect
= async_side_effect(('{}', '', 0))
3132 _get_uname
.return_value
= 'tango.briskly'
3135 cluster_id
='foxtrot',
3136 features
=['domain'],
3137 config_uri
='rados://.smb/foxtrot/config2.json',
3139 'rados://.smb/foxtrot/join1.json',
3140 'rados:mon-config-key:smb/config/foxtrot/join2.json',
3142 custom_dns
=['10.8.88.103'],
3143 include_ceph_users
=[
3144 'client.smb.fs.cephfs.share1',
3145 'client.smb.fs.cephfs.share2',
3146 'client.smb.fs.fs2.share3',
3152 'name': 'smb.tango.briskly',
3154 'deploy_arguments': [],
3157 'service_name': 'smb',
3162 'rank_generation': None,
3163 'extra_container_args': None,
3164 'extra_entrypoint_args': None,
3167 'cluster_id': 'foxtrot',
3168 'features': ['domain'],
3169 'config_uri': 'rados://.smb/foxtrot/config2.json',
3171 'rados://.smb/foxtrot/join1.json',
3172 'rados:mon-config-key:smb/config/foxtrot/join2.json',
3174 'custom_dns': ['10.8.88.103'],
3177 '[client.smb.config.tango.briskly]\nkey = None\n\n'
3178 '[client.smb.fs.cephfs.share1]\nkey = None\n\n'
3179 '[client.smb.fs.cephfs.share2]\nkey = None\n\n'
3180 '[client.smb.fs.fs2.share3]\nkey = None\n'
3182 'config_auth_entity': 'client.smb.config.tango.briskly',
3185 with
with_host(cephadm_module
, 'hostx'):
3186 with
with_service(cephadm_module
, spec
):
3187 _run_cephadm
.assert_called_with(
3189 'smb.tango.briskly',
3190 ['_orch', 'deploy'],
3192 stdin
=json
.dumps(expected
),
3193 use_current_daemon_image
=False