]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_services.py
bump version to 19.2.0-pve1
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_services.py
1 from textwrap import dedent
2 import json
3 import urllib.parse
4 import yaml
5 from mgr_util import build_url
6
7 import pytest
8
9 from unittest.mock import MagicMock, call, patch, ANY
10
11 from cephadm.serve import CephadmServe
12 from cephadm.services.cephadmservice import MonService, MgrService, MdsService, RgwService, \
13 RbdMirrorService, CrashService, CephadmDaemonDeploySpec
14 from cephadm.services.iscsi import IscsiService
15 from cephadm.services.nfs import NFSService
16 from cephadm.services.nvmeof import NvmeofService
17 from cephadm.services.osd import OSDService
18 from cephadm.services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \
19 NodeExporterService, LokiService, PromtailService
20 from cephadm.services.smb import SMBSpec
21 from cephadm.module import CephadmOrchestrator
22 from ceph.deployment.service_spec import (
23 AlertManagerSpec,
24 CephExporterSpec,
25 CustomContainerSpec,
26 GrafanaSpec,
27 IngressSpec,
28 IscsiServiceSpec,
29 MonitoringSpec,
30 NFSServiceSpec,
31 NvmeofServiceSpec,
32 PlacementSpec,
33 PrometheusSpec,
34 RGWSpec,
35 SNMPGatewaySpec,
36 ServiceSpec,
37 TracingSpec,
38 )
39 from cephadm.tests.fixtures import with_host, with_service, _run_cephadm, async_side_effect
40
41 from ceph.utils import datetime_now
42
43 from orchestrator import OrchestratorError
44 from orchestrator._interface import DaemonDescription
45
46 from typing import Dict, List
47
48 grafana_cert = """-----BEGIN CERTIFICATE-----\nMIICxjCCAa4CEQDIZSujNBlKaLJzmvntjukjMA0GCSqGSIb3DQEBDQUAMCExDTAL\nBgNVBAoMBENlcGgxEDAOBgNVBAMMB2NlcGhhZG0wHhcNMjIwNzEzMTE0NzA3WhcN\nMzIwNzEwMTE0NzA3WjAhMQ0wCwYDVQQKDARDZXBoMRAwDgYDVQQDDAdjZXBoYWRt\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyyMe4DMA+MeYK7BHZMHB\nq7zjliEOcNgxomjU8qbf5USF7Mqrf6+/87XWqj4pCyAW8x0WXEr6A56a+cmBVmt+\nqtWDzl020aoId6lL5EgLLn6/kMDCCJLq++Lg9cEofMSvcZh+lY2f+1p+C+00xent\nrLXvXGOilAZWaQfojT2BpRnNWWIFbpFwlcKrlg2G0cFjV5c1m6a0wpsQ9JHOieq0\nSvwCixajwq3CwAYuuiU1wjI4oJO4Io1+g8yB3nH2Mo/25SApCxMXuXh4kHLQr/T4\n4hqisvG4uJYgKMcSIrWj5o25mclByGi1UI/kZkCUES94i7Z/3ihx4Bad0AMs/9tw\nFwIDAQABMA0GCSqGSIb3DQEBDQUAA4IBAQAf+pwz7Gd7mDwU2LY0TQXsK6/8KGzh\nHuX+ErOb8h5cOAbvCnHjyJFWf6gCITG98k9nxU9NToG0WYuNm/max1y/54f0dtxZ\npUo6KSNl3w6iYCfGOeUIj8isi06xMmeTgMNzv8DYhDt+P2igN6LenqWTVztogkiV\nxQ5ZJFFLEw4sN0CXnrZX3t5ruakxLXLTLKeE0I91YJvjClSBGkVJq26wOKQNHMhx\npWxeydQ5EgPZY+Aviz5Dnxe8aB7oSSovpXByzxURSabOuCK21awW5WJCGNpmqhWK\nZzACBDEstccj57c4OGV0eayHJRsluVr2e9NHRINZA3qdB37e6gsI1xHo\n-----END CERTIFICATE-----\n"""
49
50 grafana_key = """-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDLIx7gMwD4x5gr\nsEdkwcGrvOOWIQ5w2DGiaNTypt/lRIXsyqt/r7/ztdaqPikLIBbzHRZcSvoDnpr5\nyYFWa36q1YPOXTbRqgh3qUvkSAsufr+QwMIIkur74uD1wSh8xK9xmH6VjZ/7Wn4L\n7TTF6e2ste9cY6KUBlZpB+iNPYGlGc1ZYgVukXCVwquWDYbRwWNXlzWbprTCmxD0\nkc6J6rRK/AKLFqPCrcLABi66JTXCMjigk7gijX6DzIHecfYyj/blICkLExe5eHiQ\nctCv9PjiGqKy8bi4liAoxxIitaPmjbmZyUHIaLVQj+RmQJQRL3iLtn/eKHHgFp3Q\nAyz/23AXAgMBAAECggEAVoTB3Mm8azlPlaQB9GcV3tiXslSn+uYJ1duCf0sV52dV\nBzKW8s5fGiTjpiTNhGCJhchowqxoaew+o47wmGc2TvqbpeRLuecKrjScD0GkCYyQ\neM2wlshEbz4FhIZdgS6gbuh9WaM1dW/oaZoBNR5aTYo7xYTmNNeyLA/jO2zr7+4W\n5yES1lMSBXpKk7bDGKYY4bsX2b5RLr2Grh2u2bp7hoLABCEvuu8tSQdWXLEXWpXo\njwmV3hc6tabypIa0mj2Dmn2Dmt1ppSO0AZWG/WAizN3f4Z0r/u9HnbVrVmh0IEDw\n3uf2LP5o3msG9qKCbzv3lMgt9mMr70HOKnJ8ohMSKQKBgQDLkNb+0nr152HU9AeJ\nvdz8BeMxcwxCG77iwZphZ1HprmYKvvXgedqWtS6FRU+nV6UuQoPUbQxJBQzrN1Qv\nwKSlOAPCrTJgNgF/RbfxZTrIgCPuK2KM8I89VZv92TSGi362oQA4MazXC8RAWjoJ\nSu1/PHzK3aXOfVNSLrOWvIYeZQKBgQD/dgT6RUXKg0UhmXj7ExevV+c7oOJTDlMl\nvLngrmbjRgPO9VxLnZQGdyaBJeRngU/UXfNgajT/MU8B5fSKInnTMawv/tW7634B\nw3v6n5kNIMIjJmENRsXBVMllDTkT9S7ApV+VoGnXRccbTiDapBThSGd0wri/CuwK\nNWK1YFOeywKBgEDyI/XG114PBUJ43NLQVWm+wx5qszWAPqV/2S5MVXD1qC6zgCSv\nG9NLWN1CIMimCNg6dm7Wn73IM7fzvhNCJgVkWqbItTLG6DFf3/DPODLx1wTMqLOI\nqFqMLqmNm9l1Nec0dKp5BsjRQzq4zp1aX21hsfrTPmwjxeqJZdioqy2VAoGAXR5X\nCCdSHlSlUW8RE2xNOOQw7KJjfWT+WAYoN0c7R+MQplL31rRU7dpm1bLLRBN11vJ8\nMYvlT5RYuVdqQSP6BkrX+hLJNBvOLbRlL+EXOBrVyVxHCkDe+u7+DnC4epbn+N8P\nLYpwqkDMKB7diPVAizIKTBxinXjMu5fkKDs5n+sCgYBbZheYKk5M0sIxiDfZuXGB\nkf4mJdEkTI1KUGRdCwO/O7hXbroGoUVJTwqBLi1tKqLLarwCITje2T200BYOzj82\nqwRkCXGtXPKnxYEEUOiFx9OeDrzsZV00cxsEnX0Zdj+PucQ/J3Cvd0dWUspJfLHJ\n39gnaegswnz9KMQAvzKFdg==\n-----END PRIVATE KEY-----\n"""
51
52
53 class FakeInventory:
54 def get_addr(self, name: str) -> str:
55 return '1.2.3.4'
56
57
58 class FakeMgr:
59 def __init__(self):
60 self.config = ''
61 self.set_mon_crush_locations: Dict[str, List[str]] = {}
62 self.check_mon_command = MagicMock(side_effect=self._check_mon_command)
63 self.mon_command = MagicMock(side_effect=self._check_mon_command)
64 self.template = MagicMock()
65 self.log = MagicMock()
66 self.inventory = FakeInventory()
67
68 def _check_mon_command(self, cmd_dict, inbuf=None):
69 prefix = cmd_dict.get('prefix')
70 if prefix == 'get-cmd':
71 return 0, self.config, ''
72 if prefix == 'set-cmd':
73 self.config = cmd_dict.get('value')
74 return 0, 'value set', ''
75 if prefix in ['auth get']:
76 return 0, '[foo]\nkeyring = asdf\n', ''
77 if prefix == 'quorum_status':
78 # actual quorum status output from testing
79 # note in this output all of the mons have blank crush locations
80 return 0, """{"election_epoch": 14, "quorum": [0, 1, 2], "quorum_names": ["vm-00", "vm-01", "vm-02"], "quorum_leader_name": "vm-00", "quorum_age": 101, "features": {"quorum_con": "4540138322906710015", "quorum_mon": ["kraken", "luminous", "mimic", "osdmap-prune", "nautilus", "octopus", "pacific", "elector-pinging", "quincy", "reef"]}, "monmap": {"epoch": 3, "fsid": "9863e1b8-6f24-11ed-8ad8-525400c13ad2", "modified": "2022-11-28T14:00:29.972488Z", "created": "2022-11-28T13:57:55.847497Z", "min_mon_release": 18, "min_mon_release_name": "reef", "election_strategy": 1, "disallowed_leaders: ": "", "stretch_mode": false, "tiebreaker_mon": "", "features": {"persistent": ["kraken", "luminous", "mimic", "osdmap-prune", "nautilus", "octopus", "pacific", "elector-pinging", "quincy", "reef"], "optional": []}, "mons": [{"rank": 0, "name": "vm-00", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.61:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.61:6789", "nonce": 0}]}, "addr": "192.168.122.61:6789/0", "public_addr": "192.168.122.61:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}, {"rank": 1, "name": "vm-01", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.63:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.63:6789", "nonce": 0}]}, "addr": "192.168.122.63:6789/0", "public_addr": "192.168.122.63:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}, {"rank": 2, "name": "vm-02", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.82:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.82:6789", "nonce": 0}]}, "addr": "192.168.122.82:6789/0", "public_addr": "192.168.122.82:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}]}}""", ''
81 if prefix == 'mon set_location':
82 self.set_mon_crush_locations[cmd_dict.get('name')] = cmd_dict.get('args')
83 return 0, '', ''
84 return -1, '', 'error'
85
86 def get_minimal_ceph_conf(self) -> str:
87 return ''
88
89 def get_mgr_ip(self) -> str:
90 return '1.2.3.4'
91
92
93 class TestCephadmService:
94 def test_set_service_url_on_dashboard(self):
95 # pylint: disable=protected-access
96 mgr = FakeMgr()
97 service_url = 'http://svc:1000'
98 service = GrafanaService(mgr)
99 service._set_service_url_on_dashboard('svc', 'get-cmd', 'set-cmd', service_url)
100 assert mgr.config == service_url
101
102 # set-cmd should not be called if value doesn't change
103 mgr.check_mon_command.reset_mock()
104 service._set_service_url_on_dashboard('svc', 'get-cmd', 'set-cmd', service_url)
105 mgr.check_mon_command.assert_called_once_with({'prefix': 'get-cmd'})
106
107 def _get_services(self, mgr):
108 # services:
109 osd_service = OSDService(mgr)
110 nfs_service = NFSService(mgr)
111 mon_service = MonService(mgr)
112 mgr_service = MgrService(mgr)
113 mds_service = MdsService(mgr)
114 rgw_service = RgwService(mgr)
115 rbd_mirror_service = RbdMirrorService(mgr)
116 grafana_service = GrafanaService(mgr)
117 alertmanager_service = AlertmanagerService(mgr)
118 prometheus_service = PrometheusService(mgr)
119 node_exporter_service = NodeExporterService(mgr)
120 loki_service = LokiService(mgr)
121 promtail_service = PromtailService(mgr)
122 crash_service = CrashService(mgr)
123 iscsi_service = IscsiService(mgr)
124 nvmeof_service = NvmeofService(mgr)
125 cephadm_services = {
126 'mon': mon_service,
127 'mgr': mgr_service,
128 'osd': osd_service,
129 'mds': mds_service,
130 'rgw': rgw_service,
131 'rbd-mirror': rbd_mirror_service,
132 'nfs': nfs_service,
133 'grafana': grafana_service,
134 'alertmanager': alertmanager_service,
135 'prometheus': prometheus_service,
136 'node-exporter': node_exporter_service,
137 'loki': loki_service,
138 'promtail': promtail_service,
139 'crash': crash_service,
140 'iscsi': iscsi_service,
141 'nvmeof': nvmeof_service,
142 }
143 return cephadm_services
144
145 def test_get_auth_entity(self):
146 mgr = FakeMgr()
147 cephadm_services = self._get_services(mgr)
148
149 for daemon_type in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]:
150 assert "client.%s.id1" % (daemon_type) == \
151 cephadm_services[daemon_type].get_auth_entity("id1", "host")
152 assert "client.%s.id1" % (daemon_type) == \
153 cephadm_services[daemon_type].get_auth_entity("id1", "")
154 assert "client.%s.id1" % (daemon_type) == \
155 cephadm_services[daemon_type].get_auth_entity("id1")
156
157 assert "client.crash.host" == \
158 cephadm_services["crash"].get_auth_entity("id1", "host")
159 with pytest.raises(OrchestratorError):
160 cephadm_services["crash"].get_auth_entity("id1", "")
161 cephadm_services["crash"].get_auth_entity("id1")
162
163 assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "host")
164 assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "")
165 assert "mon." == cephadm_services["mon"].get_auth_entity("id1")
166
167 assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1", "host")
168 assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1", "")
169 assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1")
170
171 for daemon_type in ["osd", "mds"]:
172 assert "%s.id1" % daemon_type == \
173 cephadm_services[daemon_type].get_auth_entity("id1", "host")
174 assert "%s.id1" % daemon_type == \
175 cephadm_services[daemon_type].get_auth_entity("id1", "")
176 assert "%s.id1" % daemon_type == \
177 cephadm_services[daemon_type].get_auth_entity("id1")
178
179 # services based on CephadmService shouldn't have get_auth_entity
180 with pytest.raises(AttributeError):
181 for daemon_type in ['grafana', 'alertmanager', 'prometheus', 'node-exporter', 'loki', 'promtail']:
182 cephadm_services[daemon_type].get_auth_entity("id1", "host")
183 cephadm_services[daemon_type].get_auth_entity("id1", "")
184 cephadm_services[daemon_type].get_auth_entity("id1")
185
186
187 class TestISCSIService:
188
189 mgr = FakeMgr()
190 iscsi_service = IscsiService(mgr)
191
192 iscsi_spec = IscsiServiceSpec(service_type='iscsi', service_id="a")
193 iscsi_spec.daemon_type = "iscsi"
194 iscsi_spec.daemon_id = "a"
195 iscsi_spec.spec = MagicMock()
196 iscsi_spec.spec.daemon_type = "iscsi"
197 iscsi_spec.spec.ssl_cert = ''
198 iscsi_spec.api_user = "user"
199 iscsi_spec.api_password = "password"
200 iscsi_spec.api_port = 5000
201 iscsi_spec.api_secure = False
202 iscsi_spec.ssl_cert = "cert"
203 iscsi_spec.ssl_key = "key"
204
205 mgr.spec_store = MagicMock()
206 mgr.spec_store.all_specs.get.return_value = iscsi_spec
207
208 def test_iscsi_client_caps(self):
209
210 iscsi_daemon_spec = CephadmDaemonDeploySpec(
211 host='host', daemon_id='a', service_name=self.iscsi_spec.service_name())
212
213 self.iscsi_service.prepare_create(iscsi_daemon_spec)
214
215 expected_caps = ['mon',
216 'profile rbd, allow command "osd blocklist", allow command "config-key get" with "key" prefix "iscsi/"',
217 'mgr', 'allow command "service status"',
218 'osd', 'allow rwx']
219
220 expected_call = call({'prefix': 'auth get-or-create',
221 'entity': 'client.iscsi.a',
222 'caps': expected_caps})
223 expected_call2 = call({'prefix': 'auth caps',
224 'entity': 'client.iscsi.a',
225 'caps': expected_caps})
226 expected_call3 = call({'prefix': 'auth get',
227 'entity': 'client.iscsi.a'})
228
229 assert expected_call in self.mgr.mon_command.mock_calls
230 assert expected_call2 in self.mgr.mon_command.mock_calls
231 assert expected_call3 in self.mgr.mon_command.mock_calls
232
233 @patch('cephadm.utils.resolve_ip')
234 def test_iscsi_dashboard_config(self, mock_resolve_ip):
235
236 self.mgr.check_mon_command = MagicMock()
237 self.mgr.check_mon_command.return_value = ('', '{"gateways": {}}', '')
238
239 # Case 1: use IPV4 address
240 id1 = DaemonDescription(daemon_type='iscsi', hostname="testhost1",
241 daemon_id="a", ip='192.168.1.1')
242 daemon_list = [id1]
243 mock_resolve_ip.return_value = '192.168.1.1'
244
245 self.iscsi_service.config_dashboard(daemon_list)
246
247 dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add',
248 'name': 'testhost1'},
249 'http://user:password@192.168.1.1:5000')
250
251 assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls
252
253 # Case 2: use IPV6 address
254 self.mgr.check_mon_command.reset_mock()
255
256 id1 = DaemonDescription(daemon_type='iscsi', hostname="testhost1",
257 daemon_id="a", ip='FEDC:BA98:7654:3210:FEDC:BA98:7654:3210')
258 mock_resolve_ip.return_value = 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210'
259
260 self.iscsi_service.config_dashboard(daemon_list)
261
262 dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add',
263 'name': 'testhost1'},
264 'http://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000')
265
266 assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls
267
268 # Case 3: IPV6 Address . Secure protocol
269 self.mgr.check_mon_command.reset_mock()
270
271 self.iscsi_spec.api_secure = True
272
273 self.iscsi_service.config_dashboard(daemon_list)
274
275 dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add',
276 'name': 'testhost1'},
277 'https://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000')
278
279 assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls
280
281 @patch("cephadm.serve.CephadmServe._run_cephadm")
282 @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
283 @patch("cephadm.services.iscsi.IscsiService.get_trusted_ips")
284 def test_iscsi_config(self, _get_trusted_ips, _get_name, _run_cephadm, cephadm_module: CephadmOrchestrator):
285
286 iscsi_daemon_id = 'testpool.test.qwert'
287 trusted_ips = '1.1.1.1,2.2.2.2'
288 api_port = 3456
289 api_user = 'test-user'
290 api_password = 'test-password'
291 pool = 'testpool'
292 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
293 _get_name.return_value = iscsi_daemon_id
294 _get_trusted_ips.return_value = trusted_ips
295
296 iscsi_gateway_conf = f"""# This file is generated by cephadm.
297 [config]
298 cluster_client_name = client.iscsi.{iscsi_daemon_id}
299 pool = {pool}
300 trusted_ip_list = {trusted_ips}
301 minimum_gateways = 1
302 api_port = {api_port}
303 api_user = {api_user}
304 api_password = {api_password}
305 api_secure = False
306 log_to_stderr = True
307 log_to_stderr_prefix = debug
308 log_to_file = False"""
309
310 with with_host(cephadm_module, 'test'):
311 with with_service(cephadm_module, IscsiServiceSpec(service_id=pool,
312 api_port=api_port,
313 api_user=api_user,
314 api_password=api_password,
315 pool=pool,
316 trusted_ip_list=trusted_ips)):
317 _run_cephadm.assert_called_with(
318 'test',
319 f'iscsi.{iscsi_daemon_id}',
320 ['_orch', 'deploy'],
321 [],
322 stdin=json.dumps({
323 "fsid": "fsid",
324 "name": f'iscsi.{iscsi_daemon_id}',
325 "image": '',
326 "deploy_arguments": [],
327 "params": {
328 'tcp_ports': [api_port],
329 },
330 "meta": {
331 'service_name': f'iscsi.{pool}',
332 'ports': [api_port],
333 'ip': None,
334 'deployed_by': [],
335 'rank': None,
336 'rank_generation': None,
337 'extra_container_args': None,
338 'extra_entrypoint_args': None,
339 },
340 "config_blobs": {
341 "config": "",
342 "keyring": f"[client.iscsi.{iscsi_daemon_id}]\nkey = None\n",
343 "files": {
344 "iscsi-gateway.cfg": iscsi_gateway_conf,
345 },
346 }
347 }),
348 use_current_daemon_image=False,
349 )
350
351
352 class TestNVMEOFService:
353
354 mgr = FakeMgr()
355 nvmeof_service = NvmeofService(mgr)
356
357 nvmeof_spec = NvmeofServiceSpec(service_type='nvmeof', service_id="a")
358 nvmeof_spec.daemon_type = 'nvmeof'
359 nvmeof_spec.daemon_id = "a"
360 nvmeof_spec.spec = MagicMock()
361 nvmeof_spec.spec.daemon_type = 'nvmeof'
362
363 mgr.spec_store = MagicMock()
364 mgr.spec_store.all_specs.get.return_value = nvmeof_spec
365
366 def test_nvmeof_client_caps(self):
367 pass
368
369 @patch('cephadm.utils.resolve_ip')
370 def test_nvmeof_dashboard_config(self, mock_resolve_ip):
371 pass
372
373 @patch("cephadm.inventory.Inventory.get_addr", lambda _, __: '192.168.100.100')
374 @patch("cephadm.serve.CephadmServe._run_cephadm")
375 @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
376 def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrchestrator):
377
378 nvmeof_daemon_id = 'testpool.test.qwert'
379 pool = 'testpool'
380 tgt_cmd_extra_args = '--cpumask=0xFF --msg-mempool-size=524288'
381 default_port = 5500
382 group = 'mygroup'
383 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
384 _get_name.return_value = nvmeof_daemon_id
385
386 nvmeof_gateway_conf = f"""# This file is generated by cephadm.
387 [gateway]
388 name = client.nvmeof.{nvmeof_daemon_id}
389 group = {group}
390 addr = 192.168.100.100
391 port = {default_port}
392 enable_auth = False
393 state_update_notify = True
394 state_update_interval_sec = 5
395 enable_spdk_discovery_controller = False
396 enable_prometheus_exporter = True
397 prometheus_exporter_ssl = False
398 prometheus_port = 10008
399 verify_nqns = True
400 omap_file_lock_duration = 20
401 omap_file_lock_retries = 30
402 omap_file_lock_retry_sleep_interval = 1.0
403 omap_file_update_reloads = 10
404 allowed_consecutive_spdk_ping_failures = 1
405 spdk_ping_interval_in_seconds = 2.0
406 ping_spdk_under_lock = False
407 enable_monitor_client = False
408
409 [gateway-logs]
410 log_level = INFO
411 log_files_enabled = True
412 log_files_rotation_enabled = True
413 verbose_log_messages = True
414 max_log_file_size_in_mb = 10
415 max_log_files_count = 20
416 max_log_directory_backups = 10
417 log_directory = /var/log/ceph/
418
419 [discovery]
420 addr = 192.168.100.100
421 port = 8009
422
423 [ceph]
424 pool = {pool}
425 config_file = /etc/ceph/ceph.conf
426 id = nvmeof.{nvmeof_daemon_id}
427
428 [mtls]
429 server_key = /server.key
430 client_key = /client.key
431 server_cert = /server.cert
432 client_cert = /client.cert
433 root_ca_cert = /root.ca.cert
434
435 [spdk]
436 tgt_path = /usr/local/bin/nvmf_tgt
437 rpc_socket_dir = /var/tmp/
438 rpc_socket_name = spdk.sock
439 timeout = 60.0
440 bdevs_per_cluster = 32
441 log_level = WARNING
442 conn_retries = 10
443 transports = tcp
444 transport_tcp_options = {{"in_capsule_data_size": 8192, "max_io_qpairs_per_ctrlr": 7}}
445 tgt_cmd_extra_args = {tgt_cmd_extra_args}
446
447 [monitor]
448 timeout = 1.0\n"""
449
450 with with_host(cephadm_module, 'test'):
451 with with_service(cephadm_module, NvmeofServiceSpec(service_id=pool,
452 tgt_cmd_extra_args=tgt_cmd_extra_args,
453 group=group,
454 pool=pool)):
455 _run_cephadm.assert_called_with(
456 'test',
457 f'nvmeof.{nvmeof_daemon_id}',
458 ['_orch', 'deploy'],
459 [],
460 stdin=json.dumps({
461 "fsid": "fsid",
462 "name": "nvmeof.testpool.test.qwert",
463 "image": "",
464 "deploy_arguments": [],
465 "params": {
466 "tcp_ports": [5500, 4420, 8009]
467 },
468 "meta": {
469 "service_name": "nvmeof.testpool",
470 "ports": [5500, 4420, 8009],
471 "ip": None,
472 "deployed_by": [],
473 "rank": None,
474 "rank_generation": None,
475 "extra_container_args": None,
476 "extra_entrypoint_args": None
477 },
478 "config_blobs": {
479 "config": "",
480 "keyring": "[client.nvmeof.testpool.test.qwert]\nkey = None\n",
481 "files": {
482 "ceph-nvmeof.conf": nvmeof_gateway_conf
483 }
484 }
485 }),
486 use_current_daemon_image=False,
487 )
488
489
490 class TestMonitoring:
491 def _get_config(self, url: str) -> str:
492
493 return f"""
494 # This file is generated by cephadm.
495 # See https://prometheus.io/docs/alerting/configuration/ for documentation.
496
497 global:
498 resolve_timeout: 5m
499 http_config:
500 tls_config:
501 insecure_skip_verify: true
502
503 route:
504 receiver: 'default'
505 routes:
506 - group_by: ['alertname']
507 group_wait: 10s
508 group_interval: 10s
509 repeat_interval: 1h
510 receiver: 'ceph-dashboard'
511
512 receivers:
513 - name: 'default'
514 webhook_configs:
515 - name: 'ceph-dashboard'
516 webhook_configs:
517 - url: '{url}/api/prometheus_receiver'
518 """
519
520 @pytest.mark.parametrize(
521 "dashboard_url,expected_yaml_url",
522 [
523 # loopback address
524 ("http://[::1]:8080", "http://localhost:8080"),
525 # IPv6
526 (
527 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
528 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
529 ),
530 # IPv6 to FQDN
531 (
532 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
533 "http://mgr.fqdn.test:8080",
534 ),
535 # IPv4
536 (
537 "http://192.168.0.123:8080",
538 "http://192.168.0.123:8080",
539 ),
540 # IPv4 to FQDN
541 (
542 "http://192.168.0.123:8080",
543 "http://mgr.fqdn.test:8080",
544 ),
545 ],
546 )
547 @patch("cephadm.serve.CephadmServe._run_cephadm")
548 @patch("mgr_module.MgrModule.get")
549 @patch("socket.getfqdn")
550 def test_alertmanager_config(
551 self,
552 mock_getfqdn,
553 mock_get,
554 _run_cephadm,
555 cephadm_module: CephadmOrchestrator,
556 dashboard_url,
557 expected_yaml_url,
558 ):
559 _run_cephadm.side_effect = async_side_effect(("{}", "", 0))
560 mock_get.return_value = {"services": {"dashboard": dashboard_url}}
561 purl = urllib.parse.urlparse(expected_yaml_url)
562 mock_getfqdn.return_value = purl.hostname
563
564 with with_host(cephadm_module, "test"):
565 with with_service(cephadm_module, AlertManagerSpec()):
566 y = dedent(self._get_config(expected_yaml_url)).lstrip()
567 _run_cephadm.assert_called_with(
568 'test',
569 "alertmanager.test",
570 ['_orch', 'deploy'],
571 [],
572 stdin=json.dumps({
573 "fsid": "fsid",
574 "name": 'alertmanager.test',
575 "image": '',
576 "deploy_arguments": [],
577 "params": {
578 'tcp_ports': [9093, 9094],
579 },
580 "meta": {
581 'service_name': 'alertmanager',
582 'ports': [9093, 9094],
583 'ip': None,
584 'deployed_by': [],
585 'rank': None,
586 'rank_generation': None,
587 'extra_container_args': None,
588 'extra_entrypoint_args': None,
589 },
590 "config_blobs": {
591 "files": {
592 "alertmanager.yml": y,
593 },
594 "peers": [],
595 }
596 }),
597 use_current_daemon_image=False,
598 )
599
600 @patch("cephadm.serve.CephadmServe._run_cephadm")
601 @patch("socket.getfqdn")
602 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
603 @patch("cephadm.services.monitoring.password_hash", lambda password: 'alertmanager_password_hash')
604 def test_alertmanager_config_security_enabled(self, _get_fqdn, _run_cephadm, cephadm_module: CephadmOrchestrator):
605 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
606
607 fqdn = 'host1.test'
608 _get_fqdn.return_value = fqdn
609
610 def gen_cert(host, addr):
611 return ('mycert', 'mykey')
612
613 def get_root_cert():
614 return 'my_root_cert'
615
616 with with_host(cephadm_module, 'test'):
617 cephadm_module.secure_monitoring_stack = True
618 cephadm_module.set_store(AlertmanagerService.USER_CFG_KEY, 'alertmanager_user')
619 cephadm_module.set_store(AlertmanagerService.PASS_CFG_KEY, 'alertmanager_plain_password')
620 cephadm_module.http_server.service_discovery.ssl_certs.generate_cert = MagicMock(side_effect=gen_cert)
621 cephadm_module.http_server.service_discovery.ssl_certs.get_root_cert = MagicMock(side_effect=get_root_cert)
622 with with_service(cephadm_module, AlertManagerSpec()):
623
624 y = dedent(f"""
625 # This file is generated by cephadm.
626 # See https://prometheus.io/docs/alerting/configuration/ for documentation.
627
628 global:
629 resolve_timeout: 5m
630 http_config:
631 tls_config:
632 ca_file: root_cert.pem
633
634 route:
635 receiver: 'default'
636 routes:
637 - group_by: ['alertname']
638 group_wait: 10s
639 group_interval: 10s
640 repeat_interval: 1h
641 receiver: 'ceph-dashboard'
642
643 receivers:
644 - name: 'default'
645 webhook_configs:
646 - name: 'ceph-dashboard'
647 webhook_configs:
648 - url: 'http://{fqdn}:8080/api/prometheus_receiver'
649 """).lstrip()
650
651 web_config = dedent("""
652 tls_server_config:
653 cert_file: alertmanager.crt
654 key_file: alertmanager.key
655 basic_auth_users:
656 alertmanager_user: alertmanager_password_hash""").lstrip()
657
658 _run_cephadm.assert_called_with(
659 'test',
660 "alertmanager.test",
661 ['_orch', 'deploy'],
662 [],
663 stdin=json.dumps({
664 "fsid": "fsid",
665 "name": 'alertmanager.test',
666 "image": '',
667 "deploy_arguments": [],
668 "params": {
669 'tcp_ports': [9093, 9094],
670 },
671 "meta": {
672 'service_name': 'alertmanager',
673 'ports': [9093, 9094],
674 'ip': None,
675 'deployed_by': [],
676 'rank': None,
677 'rank_generation': None,
678 'extra_container_args': None,
679 'extra_entrypoint_args': None,
680 },
681 "config_blobs": {
682 "files": {
683 "alertmanager.yml": y,
684 'alertmanager.crt': 'mycert',
685 'alertmanager.key': 'mykey',
686 'web.yml': web_config,
687 'root_cert.pem': 'my_root_cert'
688 },
689 'peers': [],
690 'web_config': '/etc/alertmanager/web.yml',
691 }
692 }),
693 use_current_daemon_image=False,
694 )
695
696 assert cephadm_module.cert_key_store.get_cert('alertmanager_cert', host='test') == 'mycert'
697 assert cephadm_module.cert_key_store.get_key('alertmanager_key', host='test') == 'mykey'
698
699 @patch("cephadm.serve.CephadmServe._run_cephadm")
700 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
701 def test_prometheus_config_security_disabled(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
702 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
703 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), rgw_frontend_type='beast')
704 with with_host(cephadm_module, 'test'):
705 # host "test" needs to have networks for keepalive to be placed
706 cephadm_module.cache.update_host_networks('test', {
707 '1.2.3.0/24': {
708 'if0': ['1.2.3.1']
709 },
710 })
711 with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \
712 with_service(cephadm_module, CephExporterSpec('ceph-exporter')) as _, \
713 with_service(cephadm_module, s) as _, \
714 with_service(cephadm_module, AlertManagerSpec('alertmanager')) as _, \
715 with_service(cephadm_module, IngressSpec(service_id='ingress',
716 frontend_port=8089,
717 monitor_port=8999,
718 monitor_user='admin',
719 monitor_password='12345',
720 keepalived_password='12345',
721 virtual_ip="1.2.3.4/32",
722 backend_service='rgw.foo')) as _, \
723 with_service(cephadm_module, PrometheusSpec('prometheus',
724 networks=['1.2.3.0/24'],
725 only_bind_port_on_networks=True)) as _:
726
727 y = dedent("""
728 # This file is generated by cephadm.
729 global:
730 scrape_interval: 10s
731 evaluation_interval: 10s
732 external_labels:
733 cluster: fsid
734
735 rule_files:
736 - /etc/prometheus/alerting/*
737
738 alerting:
739 alertmanagers:
740 - scheme: http
741 http_sd_configs:
742 - url: http://[::1]:8765/sd/prometheus/sd-config?service=alertmanager
743
744 scrape_configs:
745 - job_name: 'ceph'
746 honor_labels: true
747 relabel_configs:
748 - source_labels: [__address__]
749 target_label: cluster
750 replacement: fsid
751 - source_labels: [instance]
752 target_label: instance
753 replacement: 'ceph_cluster'
754 http_sd_configs:
755 - url: http://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
756
757 - job_name: 'node'
758 http_sd_configs:
759 - url: http://[::1]:8765/sd/prometheus/sd-config?service=node-exporter
760 relabel_configs:
761 - source_labels: [__address__]
762 target_label: cluster
763 replacement: fsid
764
765 - job_name: 'haproxy'
766 http_sd_configs:
767 - url: http://[::1]:8765/sd/prometheus/sd-config?service=haproxy
768 relabel_configs:
769 - source_labels: [__address__]
770 target_label: cluster
771 replacement: fsid
772
773 - job_name: 'ceph-exporter'
774 honor_labels: true
775 relabel_configs:
776 - source_labels: [__address__]
777 target_label: cluster
778 replacement: fsid
779 http_sd_configs:
780 - url: http://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter
781
782 - job_name: 'nvmeof'
783 http_sd_configs:
784 - url: http://[::1]:8765/sd/prometheus/sd-config?service=nvmeof
785
786 - job_name: 'federate'
787 scrape_interval: 15s
788 honor_labels: true
789 metrics_path: '/federate'
790 params:
791 'match[]':
792 - '{job="ceph"}'
793 - '{job="node"}'
794 - '{job="haproxy"}'
795 - '{job="ceph-exporter"}'
796 static_configs:
797 - targets: []
798 """).lstrip()
799
800 _run_cephadm.assert_called_with(
801 'test',
802 "prometheus.test",
803 ['_orch', 'deploy'],
804 [],
805 stdin=json.dumps({
806 "fsid": "fsid",
807 "name": 'prometheus.test',
808 "image": '',
809 "deploy_arguments": [],
810 "params": {
811 'tcp_ports': [9095],
812 'port_ips': {'8765': '1.2.3.1'}
813 },
814 "meta": {
815 'service_name': 'prometheus',
816 'ports': [9095],
817 'ip': '1.2.3.1',
818 'deployed_by': [],
819 'rank': None,
820 'rank_generation': None,
821 'extra_container_args': None,
822 'extra_entrypoint_args': None,
823 },
824 "config_blobs": {
825 "files": {
826 "prometheus.yml": y,
827 "/etc/prometheus/alerting/custom_alerts.yml": "",
828 },
829 'retention_time': '15d',
830 'retention_size': '0',
831 'ip_to_bind_to': '1.2.3.1',
832 },
833 }),
834 use_current_daemon_image=False,
835 )
836
837 @patch("cephadm.serve.CephadmServe._run_cephadm")
838 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
839 @patch("cephadm.services.monitoring.password_hash", lambda password: 'prometheus_password_hash')
840 def test_prometheus_config_security_enabled(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
841 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
842 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), rgw_frontend_type='beast')
843
844 def gen_cert(host, addr):
845 return ('mycert', 'mykey')
846
847 with with_host(cephadm_module, 'test'):
848 cephadm_module.secure_monitoring_stack = True
849 cephadm_module.set_store(PrometheusService.USER_CFG_KEY, 'prometheus_user')
850 cephadm_module.set_store(PrometheusService.PASS_CFG_KEY, 'prometheus_plain_password')
851 cephadm_module.set_store(AlertmanagerService.USER_CFG_KEY, 'alertmanager_user')
852 cephadm_module.set_store(AlertmanagerService.PASS_CFG_KEY, 'alertmanager_plain_password')
853 cephadm_module.http_server.service_discovery.username = 'sd_user'
854 cephadm_module.http_server.service_discovery.password = 'sd_password'
855 cephadm_module.http_server.service_discovery.ssl_certs.generate_cert = MagicMock(
856 side_effect=gen_cert)
857 # host "test" needs to have networks for keepalive to be placed
858 cephadm_module.cache.update_host_networks('test', {
859 '1.2.3.0/24': {
860 'if0': ['1.2.3.1']
861 },
862 })
863 with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \
864 with_service(cephadm_module, s) as _, \
865 with_service(cephadm_module, AlertManagerSpec('alertmanager')) as _, \
866 with_service(cephadm_module, IngressSpec(service_id='ingress',
867 frontend_port=8089,
868 monitor_port=8999,
869 monitor_user='admin',
870 monitor_password='12345',
871 keepalived_password='12345',
872 virtual_ip="1.2.3.4/32",
873 backend_service='rgw.foo')) as _, \
874 with_service(cephadm_module, PrometheusSpec('prometheus')) as _:
875
876 web_config = dedent("""
877 tls_server_config:
878 cert_file: prometheus.crt
879 key_file: prometheus.key
880 basic_auth_users:
881 prometheus_user: prometheus_password_hash""").lstrip()
882
883 y = dedent("""
884 # This file is generated by cephadm.
885 global:
886 scrape_interval: 10s
887 evaluation_interval: 10s
888
889 rule_files:
890 - /etc/prometheus/alerting/*
891
892 alerting:
893 alertmanagers:
894 - scheme: https
895 basic_auth:
896 username: alertmanager_user
897 password: alertmanager_plain_password
898 tls_config:
899 ca_file: root_cert.pem
900 http_sd_configs:
901 - url: https://[::1]:8765/sd/prometheus/sd-config?service=alertmanager
902 basic_auth:
903 username: sd_user
904 password: sd_password
905 tls_config:
906 ca_file: root_cert.pem
907
908 scrape_configs:
909 - job_name: 'ceph'
910 scheme: https
911 tls_config:
912 ca_file: mgr_prometheus_cert.pem
913 honor_labels: true
914 relabel_configs:
915 - source_labels: [instance]
916 target_label: instance
917 replacement: 'ceph_cluster'
918 http_sd_configs:
919 - url: https://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
920 basic_auth:
921 username: sd_user
922 password: sd_password
923 tls_config:
924 ca_file: root_cert.pem
925
926 - job_name: 'node'
927 scheme: https
928 tls_config:
929 ca_file: root_cert.pem
930 http_sd_configs:
931 - url: https://[::1]:8765/sd/prometheus/sd-config?service=node-exporter
932 basic_auth:
933 username: sd_user
934 password: sd_password
935 tls_config:
936 ca_file: root_cert.pem
937
938 - job_name: 'haproxy'
939 scheme: https
940 tls_config:
941 ca_file: root_cert.pem
942 http_sd_configs:
943 - url: https://[::1]:8765/sd/prometheus/sd-config?service=haproxy
944 basic_auth:
945 username: sd_user
946 password: sd_password
947 tls_config:
948 ca_file: root_cert.pem
949
950 - job_name: 'ceph-exporter'
951 honor_labels: true
952 scheme: https
953 tls_config:
954 ca_file: root_cert.pem
955 http_sd_configs:
956 - url: https://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter
957 basic_auth:
958 username: sd_user
959 password: sd_password
960 tls_config:
961 ca_file: root_cert.pem
962
963 - job_name: 'nvmeof'
964 honor_labels: true
965 scheme: https
966 tls_config:
967 ca_file: root_cert.pem
968 http_sd_configs:
969 - url: https://[::1]:8765/sd/prometheus/sd-config?service=nvmeof
970 basic_auth:
971 username: sd_user
972 password: sd_password
973 tls_config:
974 ca_file: root_cert.pem
975
976 """).lstrip()
977
978 _run_cephadm.assert_called_with(
979 'test',
980 "prometheus.test",
981 ['_orch', 'deploy'],
982 [],
983 stdin=json.dumps({
984 "fsid": "fsid",
985 "name": 'prometheus.test',
986 "image": '',
987 "deploy_arguments": [],
988 "params": {
989 'tcp_ports': [9095],
990 },
991 "meta": {
992 'service_name': 'prometheus',
993 'ports': [9095],
994 'ip': None,
995 'deployed_by': [],
996 'rank': None,
997 'rank_generation': None,
998 'extra_container_args': None,
999 'extra_entrypoint_args': None,
1000 },
1001 "config_blobs": {
1002 'files': {
1003 'prometheus.yml': y,
1004 'root_cert.pem': '',
1005 'mgr_prometheus_cert.pem': '',
1006 'web.yml': web_config,
1007 'prometheus.crt': 'mycert',
1008 'prometheus.key': 'mykey',
1009 "/etc/prometheus/alerting/custom_alerts.yml": "",
1010 },
1011 'retention_time': '15d',
1012 'retention_size': '0',
1013 'ip_to_bind_to': '',
1014 'web_config': '/etc/prometheus/web.yml',
1015 },
1016 }),
1017 use_current_daemon_image=False,
1018 )
1019
1020 @patch("cephadm.serve.CephadmServe._run_cephadm")
1021 def test_loki_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1022 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1023
1024 with with_host(cephadm_module, 'test'):
1025 with with_service(cephadm_module, MonitoringSpec('loki')) as _:
1026
1027 y = dedent("""
1028 # This file is generated by cephadm.
1029 auth_enabled: false
1030
1031 server:
1032 http_listen_port: 3100
1033 grpc_listen_port: 8080
1034
1035 common:
1036 path_prefix: /tmp/loki
1037 storage:
1038 filesystem:
1039 chunks_directory: /tmp/loki/chunks
1040 rules_directory: /tmp/loki/rules
1041 replication_factor: 1
1042 ring:
1043 instance_addr: 127.0.0.1
1044 kvstore:
1045 store: inmemory
1046
1047 schema_config:
1048 configs:
1049 - from: 2020-10-24
1050 store: boltdb-shipper
1051 object_store: filesystem
1052 schema: v11
1053 index:
1054 prefix: index_
1055 period: 24h
1056 - from: 2024-05-03
1057 store: tsdb
1058 object_store: filesystem
1059 schema: v13
1060 index:
1061 prefix: index_
1062 period: 24h""").lstrip()
1063
1064 _run_cephadm.assert_called_with(
1065 'test',
1066 "loki.test",
1067 ['_orch', 'deploy'],
1068 [],
1069 stdin=json.dumps({
1070 "fsid": "fsid",
1071 "name": 'loki.test',
1072 "image": '',
1073 "deploy_arguments": [],
1074 "params": {
1075 'tcp_ports': [3100],
1076 },
1077 "meta": {
1078 'service_name': 'loki',
1079 'ports': [3100],
1080 'ip': None,
1081 'deployed_by': [],
1082 'rank': None,
1083 'rank_generation': None,
1084 'extra_container_args': None,
1085 'extra_entrypoint_args': None,
1086 },
1087 "config_blobs": {
1088 "files": {
1089 "loki.yml": y
1090 },
1091 },
1092 }),
1093 use_current_daemon_image=False,
1094 )
1095
1096 @patch("cephadm.serve.CephadmServe._run_cephadm")
1097 def test_promtail_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1098 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1099
1100 with with_host(cephadm_module, 'test'):
1101 with with_service(cephadm_module, ServiceSpec('mgr')) as _, \
1102 with_service(cephadm_module, MonitoringSpec('promtail')) as _:
1103
1104 y = dedent("""
1105 # This file is generated by cephadm.
1106 server:
1107 http_listen_port: 9080
1108 grpc_listen_port: 0
1109
1110 positions:
1111 filename: /tmp/positions.yaml
1112
1113 clients:
1114 - url: http://:3100/loki/api/v1/push
1115
1116 scrape_configs:
1117 - job_name: system
1118 static_configs:
1119 - labels:
1120 job: Cluster Logs
1121 __path__: /var/log/ceph/**/*.log""").lstrip()
1122
1123 _run_cephadm.assert_called_with(
1124 'test',
1125 "promtail.test",
1126 ['_orch', 'deploy'],
1127 [],
1128 stdin=json.dumps({
1129 "fsid": "fsid",
1130 "name": 'promtail.test',
1131 "image": '',
1132 "deploy_arguments": [],
1133 "params": {
1134 'tcp_ports': [9080],
1135 },
1136 "meta": {
1137 'service_name': 'promtail',
1138 'ports': [9080],
1139 'ip': None,
1140 'deployed_by': [],
1141 'rank': None,
1142 'rank_generation': None,
1143 'extra_container_args': None,
1144 'extra_entrypoint_args': None,
1145 },
1146 "config_blobs": {
1147 "files": {
1148 "promtail.yml": y
1149 },
1150 },
1151 }),
1152 use_current_daemon_image=False,
1153 )
1154
1155 @patch("cephadm.serve.CephadmServe._run_cephadm")
1156 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4')
1157 @patch("cephadm.services.monitoring.verify_tls", lambda *_: None)
1158 def test_grafana_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1159 _run_cephadm.side_effect = async_side_effect(("{}", "", 0))
1160
1161 with with_host(cephadm_module, "test"):
1162 cephadm_module.cert_key_store.save_cert('grafana_cert', grafana_cert, host='test')
1163 cephadm_module.cert_key_store.save_key('grafana_key', grafana_key, host='test')
1164 with with_service(
1165 cephadm_module, PrometheusSpec("prometheus")
1166 ) as _, with_service(cephadm_module, ServiceSpec("mgr")) as _, with_service(
1167 cephadm_module, GrafanaSpec("grafana")
1168 ) as _:
1169 files = {
1170 'grafana.ini': dedent("""
1171 # This file is generated by cephadm.
1172 [users]
1173 default_theme = light
1174 [auth.anonymous]
1175 enabled = true
1176 org_name = 'Main Org.'
1177 org_role = 'Viewer'
1178 [server]
1179 domain = 'bootstrap.storage.lab'
1180 protocol = https
1181 cert_file = /etc/grafana/certs/cert_file
1182 cert_key = /etc/grafana/certs/cert_key
1183 http_port = 3000
1184 http_addr =
1185 [snapshots]
1186 external_enabled = false
1187 [security]
1188 disable_initial_admin_creation = true
1189 cookie_secure = true
1190 cookie_samesite = none
1191 allow_embedding = true""").lstrip(), # noqa: W291
1192 'provisioning/datasources/ceph-dashboard.yml': dedent("""
1193 # This file is generated by cephadm.
1194 apiVersion: 1
1195
1196 deleteDatasources:
1197 - name: 'Dashboard1'
1198 orgId: 1
1199
1200 datasources:
1201 - name: 'Dashboard1'
1202 type: 'prometheus'
1203 access: 'proxy'
1204 orgId: 1
1205 url: 'http://[1::4]:9095'
1206 basicAuth: false
1207 isDefault: true
1208 editable: false
1209
1210 - name: 'Loki'
1211 type: 'loki'
1212 access: 'proxy'
1213 url: ''
1214 basicAuth: false
1215 isDefault: false
1216 editable: false""").lstrip(),
1217 'certs/cert_file': dedent(f"""
1218 # generated by cephadm\n{grafana_cert}""").lstrip(),
1219 'certs/cert_key': dedent(f"""
1220 # generated by cephadm\n{grafana_key}""").lstrip(),
1221 'provisioning/dashboards/default.yml': dedent("""
1222 # This file is generated by cephadm.
1223 apiVersion: 1
1224
1225 providers:
1226 - name: 'Ceph Dashboard'
1227 orgId: 1
1228 folder: ''
1229 type: file
1230 disableDeletion: false
1231 updateIntervalSeconds: 3
1232 editable: false
1233 options:
1234 path: '/etc/grafana/provisioning/dashboards'""").lstrip(),
1235 }
1236
1237 _run_cephadm.assert_called_with(
1238 'test',
1239 "grafana.test",
1240 ['_orch', 'deploy'],
1241 [],
1242 stdin=json.dumps({
1243 "fsid": "fsid",
1244 "name": 'grafana.test',
1245 "image": '',
1246 "deploy_arguments": [],
1247 "params": {
1248 'tcp_ports': [3000],
1249 },
1250 "meta": {
1251 'service_name': 'grafana',
1252 'ports': [3000],
1253 'ip': None,
1254 'deployed_by': [],
1255 'rank': None,
1256 'rank_generation': None,
1257 'extra_container_args': None,
1258 'extra_entrypoint_args': None,
1259 },
1260 "config_blobs": {
1261 "files": files,
1262 },
1263 }),
1264 use_current_daemon_image=False,
1265 )
1266
1267 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1268 def test_grafana_initial_admin_pw(self, cephadm_module: CephadmOrchestrator):
1269 with with_host(cephadm_module, 'test'):
1270 with with_service(cephadm_module, ServiceSpec('mgr')) as _, \
1271 with_service(cephadm_module, GrafanaSpec(initial_admin_password='secure')):
1272 out = cephadm_module.cephadm_services['grafana'].generate_config(
1273 CephadmDaemonDeploySpec('test', 'daemon', 'grafana'))
1274 assert out == (
1275 {
1276 'files':
1277 {
1278 'grafana.ini':
1279 '# This file is generated by cephadm.\n'
1280 '[users]\n'
1281 ' default_theme = light\n'
1282 '[auth.anonymous]\n'
1283 ' enabled = true\n'
1284 " org_name = 'Main Org.'\n"
1285 " org_role = 'Viewer'\n"
1286 '[server]\n'
1287 " domain = 'bootstrap.storage.lab'\n"
1288 ' protocol = https\n'
1289 ' cert_file = /etc/grafana/certs/cert_file\n'
1290 ' cert_key = /etc/grafana/certs/cert_key\n'
1291 ' http_port = 3000\n'
1292 ' http_addr = \n'
1293 '[snapshots]\n'
1294 ' external_enabled = false\n'
1295 '[security]\n'
1296 ' admin_user = admin\n'
1297 ' admin_password = secure\n'
1298 ' cookie_secure = true\n'
1299 ' cookie_samesite = none\n'
1300 ' allow_embedding = true',
1301 'provisioning/datasources/ceph-dashboard.yml':
1302 "# This file is generated by cephadm.\n"
1303 "apiVersion: 1\n\n"
1304 'deleteDatasources:\n\n'
1305 'datasources:\n\n'
1306 " - name: 'Loki'\n"
1307 " type: 'loki'\n"
1308 " access: 'proxy'\n"
1309 " url: ''\n"
1310 ' basicAuth: false\n'
1311 ' isDefault: false\n'
1312 ' editable: false',
1313 'certs/cert_file': ANY,
1314 'certs/cert_key': ANY,
1315 'provisioning/dashboards/default.yml':
1316 '# This file is generated by cephadm.\n'
1317 'apiVersion: 1\n\n'
1318 'providers:\n'
1319 " - name: 'Ceph Dashboard'\n"
1320 ' orgId: 1\n'
1321 " folder: ''\n"
1322 ' type: file\n'
1323 ' disableDeletion: false\n'
1324 ' updateIntervalSeconds: 3\n'
1325 ' editable: false\n'
1326 ' options:\n'
1327 " path: '/etc/grafana/provisioning/dashboards'"
1328 }}, ['secure_monitoring_stack:False'])
1329
1330 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1331 def test_grafana_no_anon_access(self, cephadm_module: CephadmOrchestrator):
1332 # with anonymous_access set to False, expecting the [auth.anonymous] section
1333 # to not be present in the grafana config. Note that we require an initial_admin_password
1334 # to be provided when anonymous_access is False
1335 with with_host(cephadm_module, 'test'):
1336 with with_service(cephadm_module, ServiceSpec('mgr')) as _, \
1337 with_service(cephadm_module, GrafanaSpec(anonymous_access=False, initial_admin_password='secure')):
1338 out = cephadm_module.cephadm_services['grafana'].generate_config(
1339 CephadmDaemonDeploySpec('test', 'daemon', 'grafana'))
1340 assert out == (
1341 {
1342 'files':
1343 {
1344 'grafana.ini':
1345 '# This file is generated by cephadm.\n'
1346 '[users]\n'
1347 ' default_theme = light\n'
1348 '[server]\n'
1349 " domain = 'bootstrap.storage.lab'\n"
1350 ' protocol = https\n'
1351 ' cert_file = /etc/grafana/certs/cert_file\n'
1352 ' cert_key = /etc/grafana/certs/cert_key\n'
1353 ' http_port = 3000\n'
1354 ' http_addr = \n'
1355 '[snapshots]\n'
1356 ' external_enabled = false\n'
1357 '[security]\n'
1358 ' admin_user = admin\n'
1359 ' admin_password = secure\n'
1360 ' cookie_secure = true\n'
1361 ' cookie_samesite = none\n'
1362 ' allow_embedding = true',
1363 'provisioning/datasources/ceph-dashboard.yml':
1364 "# This file is generated by cephadm.\n"
1365 "apiVersion: 1\n\n"
1366 'deleteDatasources:\n\n'
1367 'datasources:\n\n'
1368 " - name: 'Loki'\n"
1369 " type: 'loki'\n"
1370 " access: 'proxy'\n"
1371 " url: ''\n"
1372 ' basicAuth: false\n'
1373 ' isDefault: false\n'
1374 ' editable: false',
1375 'certs/cert_file': ANY,
1376 'certs/cert_key': ANY,
1377 'provisioning/dashboards/default.yml':
1378 '# This file is generated by cephadm.\n'
1379 'apiVersion: 1\n\n'
1380 'providers:\n'
1381 " - name: 'Ceph Dashboard'\n"
1382 ' orgId: 1\n'
1383 " folder: ''\n"
1384 ' type: file\n'
1385 ' disableDeletion: false\n'
1386 ' updateIntervalSeconds: 3\n'
1387 ' editable: false\n'
1388 ' options:\n'
1389 " path: '/etc/grafana/provisioning/dashboards'"
1390 }}, ['secure_monitoring_stack:False'])
1391
1392 @patch("cephadm.serve.CephadmServe._run_cephadm")
1393 def test_monitoring_ports(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1394 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1395
1396 with with_host(cephadm_module, 'test'):
1397
1398 yaml_str = """service_type: alertmanager
1399 service_name: alertmanager
1400 placement:
1401 count: 1
1402 spec:
1403 port: 4200
1404 """
1405 yaml_file = yaml.safe_load(yaml_str)
1406 spec = ServiceSpec.from_json(yaml_file)
1407
1408 with patch("cephadm.services.monitoring.AlertmanagerService.generate_config", return_value=({}, [])):
1409 with with_service(cephadm_module, spec):
1410
1411 CephadmServe(cephadm_module)._check_daemons()
1412
1413 _run_cephadm.assert_called_with(
1414 'test',
1415 "alertmanager.test",
1416 ['_orch', 'deploy'],
1417 [],
1418 stdin=json.dumps({
1419 "fsid": "fsid",
1420 "name": 'alertmanager.test',
1421 "image": '',
1422 "deploy_arguments": [],
1423 "params": {
1424 'tcp_ports': [4200, 9094],
1425 'reconfig': True,
1426 },
1427 "meta": {
1428 'service_name': 'alertmanager',
1429 'ports': [4200, 9094],
1430 'ip': None,
1431 'deployed_by': [],
1432 'rank': None,
1433 'rank_generation': None,
1434 'extra_container_args': None,
1435 'extra_entrypoint_args': None,
1436 },
1437 "config_blobs": {},
1438 }),
1439 use_current_daemon_image=True,
1440 )
1441
1442
1443 class TestRGWService:
1444
1445 @pytest.mark.parametrize(
1446 "frontend, ssl, extra_args, expected",
1447 [
1448 ('beast', False, ['tcp_nodelay=1'],
1449 'beast endpoint=[fd00:fd00:fd00:3000::1]:80 tcp_nodelay=1'),
1450 ('beast', True, ['tcp_nodelay=0', 'max_header_size=65536'],
1451 'beast ssl_endpoint=[fd00:fd00:fd00:3000::1]:443 ssl_certificate=config://rgw/cert/rgw.foo tcp_nodelay=0 max_header_size=65536'),
1452 ('civetweb', False, [], 'civetweb port=[fd00:fd00:fd00:3000::1]:80'),
1453 ('civetweb', True, None,
1454 'civetweb port=[fd00:fd00:fd00:3000::1]:443s ssl_certificate=config://rgw/cert/rgw.foo'),
1455 ]
1456 )
1457 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1458 def test_rgw_update(self, frontend, ssl, extra_args, expected, cephadm_module: CephadmOrchestrator):
1459 with with_host(cephadm_module, 'host1'):
1460 cephadm_module.cache.update_host_networks('host1', {
1461 'fd00:fd00:fd00:3000::/64': {
1462 'if0': ['fd00:fd00:fd00:3000::1']
1463 }
1464 })
1465 s = RGWSpec(service_id="foo",
1466 networks=['fd00:fd00:fd00:3000::/64'],
1467 ssl=ssl,
1468 rgw_frontend_type=frontend,
1469 rgw_frontend_extra_args=extra_args)
1470 with with_service(cephadm_module, s) as dds:
1471 _, f, _ = cephadm_module.check_mon_command({
1472 'prefix': 'config get',
1473 'who': f'client.{dds[0]}',
1474 'key': 'rgw_frontends',
1475 })
1476 assert f == expected
1477
1478
1479 class TestMonService:
1480
1481 def test_set_crush_locations(self, cephadm_module: CephadmOrchestrator):
1482 mgr = FakeMgr()
1483 mon_service = MonService(mgr)
1484 mon_spec = ServiceSpec(service_type='mon', crush_locations={'vm-00': ['datacenter=a', 'rack=1'], 'vm-01': ['datacenter=a'], 'vm-02': ['datacenter=b', 'rack=3']})
1485
1486 mon_daemons = [
1487 DaemonDescription(daemon_type='mon', daemon_id='vm-00', hostname='vm-00'),
1488 DaemonDescription(daemon_type='mon', daemon_id='vm-01', hostname='vm-01'),
1489 DaemonDescription(daemon_type='mon', daemon_id='vm-02', hostname='vm-02')
1490 ]
1491 mon_service.set_crush_locations(mon_daemons, mon_spec)
1492 assert 'vm-00' in mgr.set_mon_crush_locations
1493 assert mgr.set_mon_crush_locations['vm-00'] == ['datacenter=a', 'rack=1']
1494 assert 'vm-01' in mgr.set_mon_crush_locations
1495 assert mgr.set_mon_crush_locations['vm-01'] == ['datacenter=a']
1496 assert 'vm-02' in mgr.set_mon_crush_locations
1497 assert mgr.set_mon_crush_locations['vm-02'] == ['datacenter=b', 'rack=3']
1498
1499
1500 class TestSNMPGateway:
1501
1502 @patch("cephadm.serve.CephadmServe._run_cephadm")
1503 def test_snmp_v2c_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1504 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1505
1506 spec = SNMPGatewaySpec(
1507 snmp_version='V2c',
1508 snmp_destination='192.168.1.1:162',
1509 credentials={
1510 'snmp_community': 'public'
1511 })
1512
1513 config = {
1514 "destination": spec.snmp_destination,
1515 "snmp_version": spec.snmp_version,
1516 "snmp_community": spec.credentials.get('snmp_community')
1517 }
1518
1519 with with_host(cephadm_module, 'test'):
1520 with with_service(cephadm_module, spec):
1521 _run_cephadm.assert_called_with(
1522 'test',
1523 "snmp-gateway.test",
1524 ['_orch', 'deploy'],
1525 [],
1526 stdin=json.dumps({
1527 "fsid": "fsid",
1528 "name": 'snmp-gateway.test',
1529 "image": '',
1530 "deploy_arguments": [],
1531 "params": {
1532 'tcp_ports': [9464],
1533 },
1534 "meta": {
1535 'service_name': 'snmp-gateway',
1536 'ports': [9464],
1537 'ip': None,
1538 'deployed_by': [],
1539 'rank': None,
1540 'rank_generation': None,
1541 'extra_container_args': None,
1542 'extra_entrypoint_args': None,
1543 },
1544 "config_blobs": config,
1545 }),
1546 use_current_daemon_image=False,
1547 )
1548
1549 @patch("cephadm.serve.CephadmServe._run_cephadm")
1550 def test_snmp_v2c_with_port(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1551 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1552
1553 spec = SNMPGatewaySpec(
1554 snmp_version='V2c',
1555 snmp_destination='192.168.1.1:162',
1556 credentials={
1557 'snmp_community': 'public'
1558 },
1559 port=9465)
1560
1561 config = {
1562 "destination": spec.snmp_destination,
1563 "snmp_version": spec.snmp_version,
1564 "snmp_community": spec.credentials.get('snmp_community')
1565 }
1566
1567 with with_host(cephadm_module, 'test'):
1568 with with_service(cephadm_module, spec):
1569 _run_cephadm.assert_called_with(
1570 'test',
1571 "snmp-gateway.test",
1572 ['_orch', 'deploy'],
1573 [],
1574 stdin=json.dumps({
1575 "fsid": "fsid",
1576 "name": 'snmp-gateway.test',
1577 "image": '',
1578 "deploy_arguments": [],
1579 "params": {
1580 'tcp_ports': [9465],
1581 },
1582 "meta": {
1583 'service_name': 'snmp-gateway',
1584 'ports': [9465],
1585 'ip': None,
1586 'deployed_by': [],
1587 'rank': None,
1588 'rank_generation': None,
1589 'extra_container_args': None,
1590 'extra_entrypoint_args': None,
1591 },
1592 "config_blobs": config,
1593 }),
1594 use_current_daemon_image=False,
1595 )
1596
1597 @patch("cephadm.serve.CephadmServe._run_cephadm")
1598 def test_snmp_v3nopriv_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1599 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1600
1601 spec = SNMPGatewaySpec(
1602 snmp_version='V3',
1603 snmp_destination='192.168.1.1:162',
1604 engine_id='8000C53F00000000',
1605 credentials={
1606 'snmp_v3_auth_username': 'myuser',
1607 'snmp_v3_auth_password': 'mypassword'
1608 })
1609
1610 config = {
1611 'destination': spec.snmp_destination,
1612 'snmp_version': spec.snmp_version,
1613 'snmp_v3_auth_protocol': 'SHA',
1614 'snmp_v3_auth_username': 'myuser',
1615 'snmp_v3_auth_password': 'mypassword',
1616 'snmp_v3_engine_id': '8000C53F00000000'
1617 }
1618
1619 with with_host(cephadm_module, 'test'):
1620 with with_service(cephadm_module, spec):
1621 _run_cephadm.assert_called_with(
1622 'test',
1623 "snmp-gateway.test",
1624 ['_orch', 'deploy'],
1625 [],
1626 stdin=json.dumps({
1627 "fsid": "fsid",
1628 "name": 'snmp-gateway.test',
1629 "image": '',
1630 "deploy_arguments": [],
1631 "params": {
1632 'tcp_ports': [9464],
1633 },
1634 "meta": {
1635 'service_name': 'snmp-gateway',
1636 'ports': [9464],
1637 'ip': None,
1638 'deployed_by': [],
1639 'rank': None,
1640 'rank_generation': None,
1641 'extra_container_args': None,
1642 'extra_entrypoint_args': None,
1643 },
1644 "config_blobs": config,
1645 }),
1646 use_current_daemon_image=False,
1647 )
1648
1649 @patch("cephadm.serve.CephadmServe._run_cephadm")
1650 def test_snmp_v3priv_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1651 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1652
1653 spec = SNMPGatewaySpec(
1654 snmp_version='V3',
1655 snmp_destination='192.168.1.1:162',
1656 engine_id='8000C53F00000000',
1657 auth_protocol='MD5',
1658 privacy_protocol='AES',
1659 credentials={
1660 'snmp_v3_auth_username': 'myuser',
1661 'snmp_v3_auth_password': 'mypassword',
1662 'snmp_v3_priv_password': 'mysecret',
1663 })
1664
1665 config = {
1666 'destination': spec.snmp_destination,
1667 'snmp_version': spec.snmp_version,
1668 'snmp_v3_auth_protocol': 'MD5',
1669 'snmp_v3_auth_username': spec.credentials.get('snmp_v3_auth_username'),
1670 'snmp_v3_auth_password': spec.credentials.get('snmp_v3_auth_password'),
1671 'snmp_v3_engine_id': '8000C53F00000000',
1672 'snmp_v3_priv_protocol': spec.privacy_protocol,
1673 'snmp_v3_priv_password': spec.credentials.get('snmp_v3_priv_password'),
1674 }
1675
1676 with with_host(cephadm_module, 'test'):
1677 with with_service(cephadm_module, spec):
1678 _run_cephadm.assert_called_with(
1679 'test',
1680 "snmp-gateway.test",
1681 ['_orch', 'deploy'],
1682 [],
1683 stdin=json.dumps({
1684 "fsid": "fsid",
1685 "name": 'snmp-gateway.test',
1686 "image": '',
1687 "deploy_arguments": [],
1688 "params": {
1689 'tcp_ports': [9464],
1690 },
1691 "meta": {
1692 'service_name': 'snmp-gateway',
1693 'ports': [9464],
1694 'ip': None,
1695 'deployed_by': [],
1696 'rank': None,
1697 'rank_generation': None,
1698 'extra_container_args': None,
1699 'extra_entrypoint_args': None,
1700 },
1701 "config_blobs": config,
1702 }),
1703 use_current_daemon_image=False,
1704 )
1705
1706
1707 class TestIngressService:
1708
1709 @pytest.mark.parametrize(
1710 "enable_haproxy_protocol",
1711 [False, True],
1712 )
1713 @patch("cephadm.inventory.Inventory.get_addr")
1714 @patch("cephadm.utils.resolve_ip")
1715 @patch("cephadm.inventory.HostCache.get_daemons_by_service")
1716 @patch("cephadm.serve.CephadmServe._run_cephadm")
1717 def test_ingress_config_nfs_multiple_nfs_same_rank(
1718 self,
1719 _run_cephadm,
1720 _get_daemons_by_service,
1721 _resolve_ip, _get_addr,
1722 cephadm_module: CephadmOrchestrator,
1723 enable_haproxy_protocol: bool,
1724 ):
1725 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1726
1727 def fake_resolve_ip(hostname: str) -> str:
1728 if hostname == 'host1':
1729 return '192.168.122.111'
1730 elif hostname == 'host2':
1731 return '192.168.122.222'
1732 else:
1733 return 'xxx.xxx.xxx.xxx'
1734 _resolve_ip.side_effect = fake_resolve_ip
1735
1736 def fake_get_addr(hostname: str) -> str:
1737 return hostname
1738 _get_addr.side_effect = fake_get_addr
1739
1740 nfs_service = NFSServiceSpec(
1741 service_id="foo",
1742 placement=PlacementSpec(
1743 count=1,
1744 hosts=['host1', 'host2']),
1745 port=12049,
1746 enable_haproxy_protocol=enable_haproxy_protocol,
1747 )
1748
1749 ispec = IngressSpec(
1750 service_type='ingress',
1751 service_id='nfs.foo',
1752 backend_service='nfs.foo',
1753 frontend_port=2049,
1754 monitor_port=9049,
1755 virtual_ip='192.168.122.100/24',
1756 monitor_user='admin',
1757 monitor_password='12345',
1758 keepalived_password='12345',
1759 enable_haproxy_protocol=enable_haproxy_protocol,
1760 )
1761
1762 cephadm_module.spec_store._specs = {
1763 'nfs.foo': nfs_service,
1764 'ingress.nfs.foo': ispec
1765 }
1766 cephadm_module.spec_store.spec_created = {
1767 'nfs.foo': datetime_now(),
1768 'ingress.nfs.foo': datetime_now()
1769 }
1770
1771 # in both test cases we'll do here, we want only the ip
1772 # for the host1 nfs daemon as we'll end up giving that
1773 # one higher rank_generation but the same rank as the one
1774 # on host2
1775 haproxy_txt = (
1776 '# This file is generated by cephadm.\n'
1777 'global\n'
1778 ' log 127.0.0.1 local2\n'
1779 ' chroot /var/lib/haproxy\n'
1780 ' pidfile /var/lib/haproxy/haproxy.pid\n'
1781 ' maxconn 8000\n'
1782 ' daemon\n'
1783 ' stats socket /var/lib/haproxy/stats\n\n'
1784 'defaults\n'
1785 ' mode tcp\n'
1786 ' log global\n'
1787 ' timeout queue 1m\n'
1788 ' timeout connect 10s\n'
1789 ' timeout client 1m\n'
1790 ' timeout server 1m\n'
1791 ' timeout check 10s\n'
1792 ' maxconn 8000\n\n'
1793 'frontend stats\n'
1794 ' mode http\n'
1795 ' bind 192.168.122.100:9049\n'
1796 ' bind host1:9049\n'
1797 ' stats enable\n'
1798 ' stats uri /stats\n'
1799 ' stats refresh 10s\n'
1800 ' stats auth admin:12345\n'
1801 ' http-request use-service prometheus-exporter if { path /metrics }\n'
1802 ' monitor-uri /health\n\n'
1803 'frontend frontend\n'
1804 ' bind 192.168.122.100:2049\n'
1805 ' default_backend backend\n\n'
1806 'backend backend\n'
1807 ' mode tcp\n'
1808 ' balance source\n'
1809 ' hash-type consistent\n'
1810 )
1811 if enable_haproxy_protocol:
1812 haproxy_txt += ' default-server send-proxy-v2\n'
1813 haproxy_txt += ' server nfs.foo.0 192.168.122.111:12049 check\n'
1814 haproxy_expected_conf = {
1815 'files': {'haproxy.cfg': haproxy_txt}
1816 }
1817
1818 # verify we get the same cfg regardless of the order in which the nfs daemons are returned
1819 # in this case both nfs are rank 0, so it should only take the one with rank_generation 1 a.k.a
1820 # the one on host1
1821 nfs_daemons = [
1822 DaemonDescription(daemon_type='nfs', daemon_id='foo.0.1.host1.qwerty', hostname='host1', rank=0, rank_generation=1, ports=[12049]),
1823 DaemonDescription(daemon_type='nfs', daemon_id='foo.0.0.host2.abcdef', hostname='host2', rank=0, rank_generation=0, ports=[12049])
1824 ]
1825 _get_daemons_by_service.return_value = nfs_daemons
1826
1827 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
1828 CephadmDaemonDeploySpec(host='host1', daemon_id='ingress', service_name=ispec.service_name()))
1829
1830 assert haproxy_generated_conf[0] == haproxy_expected_conf
1831
1832 # swapping order now, should still pick out the one with the higher rank_generation
1833 # in this case both nfs are rank 0, so it should only take the one with rank_generation 1 a.k.a
1834 # the one on host1
1835 nfs_daemons = [
1836 DaemonDescription(daemon_type='nfs', daemon_id='foo.0.0.host2.abcdef', hostname='host2', rank=0, rank_generation=0, ports=[12049]),
1837 DaemonDescription(daemon_type='nfs', daemon_id='foo.0.1.host1.qwerty', hostname='host1', rank=0, rank_generation=1, ports=[12049])
1838 ]
1839 _get_daemons_by_service.return_value = nfs_daemons
1840
1841 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
1842 CephadmDaemonDeploySpec(host='host1', daemon_id='ingress', service_name=ispec.service_name()))
1843
1844 assert haproxy_generated_conf[0] == haproxy_expected_conf
1845
1846 @patch("cephadm.serve.CephadmServe._run_cephadm")
1847 def test_ingress_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1848 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1849
1850 with with_host(cephadm_module, 'test', addr='1.2.3.7'):
1851 cephadm_module.cache.update_host_networks('test', {
1852 '1.2.3.0/24': {
1853 'if0': ['1.2.3.4']
1854 }
1855 })
1856
1857 # the ingress backend
1858 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
1859 rgw_frontend_type='beast')
1860
1861 ispec = IngressSpec(service_type='ingress',
1862 service_id='test',
1863 backend_service='rgw.foo',
1864 frontend_port=8089,
1865 monitor_port=8999,
1866 monitor_user='admin',
1867 monitor_password='12345',
1868 keepalived_password='12345',
1869 virtual_interface_networks=['1.2.3.0/24'],
1870 virtual_ip="1.2.3.4/32")
1871 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
1872 # generate the keepalived conf based on the specified spec
1873 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
1874 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1875
1876 keepalived_expected_conf = {
1877 'files':
1878 {
1879 'keepalived.conf':
1880 '# This file is generated by cephadm.\n'
1881 'global_defs {\n '
1882 'enable_script_security\n '
1883 'script_user root\n'
1884 '}\n\n'
1885 'vrrp_script check_backend {\n '
1886 'script "/usr/bin/curl http://1.2.3.7:8999/health"\n '
1887 'weight -20\n '
1888 'interval 2\n '
1889 'rise 2\n '
1890 'fall 2\n}\n\n'
1891 'vrrp_instance VI_0 {\n '
1892 'state MASTER\n '
1893 'priority 100\n '
1894 'interface if0\n '
1895 'virtual_router_id 50\n '
1896 'advert_int 1\n '
1897 'authentication {\n '
1898 'auth_type PASS\n '
1899 'auth_pass 12345\n '
1900 '}\n '
1901 'unicast_src_ip 1.2.3.4\n '
1902 'unicast_peer {\n '
1903 '}\n '
1904 'virtual_ipaddress {\n '
1905 '1.2.3.4/32 dev if0\n '
1906 '}\n '
1907 'track_script {\n '
1908 'check_backend\n }\n'
1909 '}\n'
1910 }
1911 }
1912
1913 # check keepalived config
1914 assert keepalived_generated_conf[0] == keepalived_expected_conf
1915
1916 # generate the haproxy conf based on the specified spec
1917 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
1918 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1919
1920 haproxy_expected_conf = {
1921 'files':
1922 {
1923 'haproxy.cfg':
1924 '# This file is generated by cephadm.'
1925 '\nglobal\n log '
1926 '127.0.0.1 local2\n '
1927 'chroot /var/lib/haproxy\n '
1928 'pidfile /var/lib/haproxy/haproxy.pid\n '
1929 'maxconn 8000\n '
1930 'daemon\n '
1931 'stats socket /var/lib/haproxy/stats\n'
1932 '\ndefaults\n '
1933 'mode http\n '
1934 'log global\n '
1935 'option httplog\n '
1936 'option dontlognull\n '
1937 'option http-server-close\n '
1938 'option forwardfor except 127.0.0.0/8\n '
1939 'option redispatch\n '
1940 'retries 3\n '
1941 'timeout queue 20s\n '
1942 'timeout connect 5s\n '
1943 'timeout http-request 1s\n '
1944 'timeout http-keep-alive 5s\n '
1945 'timeout client 30s\n '
1946 'timeout server 30s\n '
1947 'timeout check 5s\n '
1948 'maxconn 8000\n'
1949 '\nfrontend stats\n '
1950 'mode http\n '
1951 'bind 1.2.3.4:8999\n '
1952 'bind 1.2.3.7:8999\n '
1953 'stats enable\n '
1954 'stats uri /stats\n '
1955 'stats refresh 10s\n '
1956 'stats auth admin:12345\n '
1957 'http-request use-service prometheus-exporter if { path /metrics }\n '
1958 'monitor-uri /health\n'
1959 '\nfrontend frontend\n '
1960 'bind 1.2.3.4:8089\n '
1961 'default_backend backend\n\n'
1962 'backend backend\n '
1963 'option forwardfor\n '
1964 'balance static-rr\n '
1965 'option httpchk HEAD / HTTP/1.0\n '
1966 'server '
1967 + haproxy_generated_conf[1][0] + ' 1.2.3.7:80 check weight 100 inter 2s\n'
1968 }
1969 }
1970
1971 assert haproxy_generated_conf[0] == haproxy_expected_conf
1972
1973 @patch("cephadm.serve.CephadmServe._run_cephadm")
1974 def test_ingress_config_ssl_rgw(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1975 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1976
1977 with with_host(cephadm_module, 'test'):
1978 cephadm_module.cache.update_host_networks('test', {
1979 '1.2.3.0/24': {
1980 'if0': ['1.2.3.1']
1981 }
1982 })
1983
1984 # the ingress backend
1985 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
1986 rgw_frontend_type='beast', rgw_frontend_port=443, ssl=True)
1987
1988 ispec = IngressSpec(service_type='ingress',
1989 service_id='test',
1990 backend_service='rgw.foo',
1991 frontend_port=8089,
1992 monitor_port=8999,
1993 monitor_user='admin',
1994 monitor_password='12345',
1995 keepalived_password='12345',
1996 virtual_interface_networks=['1.2.3.0/24'],
1997 virtual_ip="1.2.3.4/32")
1998 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
1999 # generate the keepalived conf based on the specified spec
2000 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
2001 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
2002
2003 keepalived_expected_conf = {
2004 'files':
2005 {
2006 'keepalived.conf':
2007 '# This file is generated by cephadm.\n'
2008 'global_defs {\n '
2009 'enable_script_security\n '
2010 'script_user root\n'
2011 '}\n\n'
2012 'vrrp_script check_backend {\n '
2013 'script "/usr/bin/curl http://[1::4]:8999/health"\n '
2014 'weight -20\n '
2015 'interval 2\n '
2016 'rise 2\n '
2017 'fall 2\n}\n\n'
2018 'vrrp_instance VI_0 {\n '
2019 'state MASTER\n '
2020 'priority 100\n '
2021 'interface if0\n '
2022 'virtual_router_id 50\n '
2023 'advert_int 1\n '
2024 'authentication {\n '
2025 'auth_type PASS\n '
2026 'auth_pass 12345\n '
2027 '}\n '
2028 'unicast_src_ip 1.2.3.1\n '
2029 'unicast_peer {\n '
2030 '}\n '
2031 'virtual_ipaddress {\n '
2032 '1.2.3.4/32 dev if0\n '
2033 '}\n '
2034 'track_script {\n '
2035 'check_backend\n }\n'
2036 '}\n'
2037 }
2038 }
2039
2040 # check keepalived config
2041 assert keepalived_generated_conf[0] == keepalived_expected_conf
2042
2043 # generate the haproxy conf based on the specified spec
2044 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
2045 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
2046
2047 haproxy_expected_conf = {
2048 'files':
2049 {
2050 'haproxy.cfg':
2051 '# This file is generated by cephadm.'
2052 '\nglobal\n log '
2053 '127.0.0.1 local2\n '
2054 'chroot /var/lib/haproxy\n '
2055 'pidfile /var/lib/haproxy/haproxy.pid\n '
2056 'maxconn 8000\n '
2057 'daemon\n '
2058 'stats socket /var/lib/haproxy/stats\n'
2059 '\ndefaults\n '
2060 'mode http\n '
2061 'log global\n '
2062 'option httplog\n '
2063 'option dontlognull\n '
2064 'option http-server-close\n '
2065 'option forwardfor except 127.0.0.0/8\n '
2066 'option redispatch\n '
2067 'retries 3\n '
2068 'timeout queue 20s\n '
2069 'timeout connect 5s\n '
2070 'timeout http-request 1s\n '
2071 'timeout http-keep-alive 5s\n '
2072 'timeout client 30s\n '
2073 'timeout server 30s\n '
2074 'timeout check 5s\n '
2075 'maxconn 8000\n'
2076 '\nfrontend stats\n '
2077 'mode http\n '
2078 'bind 1.2.3.4:8999\n '
2079 'bind 1::4:8999\n '
2080 'stats enable\n '
2081 'stats uri /stats\n '
2082 'stats refresh 10s\n '
2083 'stats auth admin:12345\n '
2084 'http-request use-service prometheus-exporter if { path /metrics }\n '
2085 'monitor-uri /health\n'
2086 '\nfrontend frontend\n '
2087 'bind 1.2.3.4:8089\n '
2088 'default_backend backend\n\n'
2089 'backend backend\n '
2090 'option forwardfor\n '
2091 'default-server ssl\n '
2092 'default-server verify none\n '
2093 'balance static-rr\n '
2094 'option httpchk HEAD / HTTP/1.0\n '
2095 'server '
2096 + haproxy_generated_conf[1][0] + ' 1::4:443 check weight 100 inter 2s\n'
2097 }
2098 }
2099
2100 assert haproxy_generated_conf[0] == haproxy_expected_conf
2101
2102 @patch("cephadm.serve.CephadmServe._run_cephadm")
2103 def test_ingress_config_multi_vips(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2104 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2105
2106 with with_host(cephadm_module, 'test', addr='1.2.3.7'):
2107 cephadm_module.cache.update_host_networks('test', {
2108 '1.2.3.0/24': {
2109 'if0': ['1.2.3.1']
2110 }
2111 })
2112
2113 # Check the ingress with multiple VIPs
2114 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
2115 rgw_frontend_type='beast')
2116
2117 ispec = IngressSpec(service_type='ingress',
2118 service_id='test',
2119 backend_service='rgw.foo',
2120 frontend_port=8089,
2121 monitor_port=8999,
2122 monitor_user='admin',
2123 monitor_password='12345',
2124 keepalived_password='12345',
2125 virtual_interface_networks=['1.2.3.0/24'],
2126 virtual_ips_list=["1.2.3.4/32"])
2127 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
2128 # generate the keepalived conf based on the specified spec
2129 # Test with only 1 IP on the list, as it will fail with more VIPS but only one host.
2130 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
2131 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
2132
2133 keepalived_expected_conf = {
2134 'files':
2135 {
2136 'keepalived.conf':
2137 '# This file is generated by cephadm.\n'
2138 'global_defs {\n '
2139 'enable_script_security\n '
2140 'script_user root\n'
2141 '}\n\n'
2142 'vrrp_script check_backend {\n '
2143 'script "/usr/bin/curl http://1.2.3.7:8999/health"\n '
2144 'weight -20\n '
2145 'interval 2\n '
2146 'rise 2\n '
2147 'fall 2\n}\n\n'
2148 'vrrp_instance VI_0 {\n '
2149 'state MASTER\n '
2150 'priority 100\n '
2151 'interface if0\n '
2152 'virtual_router_id 50\n '
2153 'advert_int 1\n '
2154 'authentication {\n '
2155 'auth_type PASS\n '
2156 'auth_pass 12345\n '
2157 '}\n '
2158 'unicast_src_ip 1.2.3.1\n '
2159 'unicast_peer {\n '
2160 '}\n '
2161 'virtual_ipaddress {\n '
2162 '1.2.3.4/32 dev if0\n '
2163 '}\n '
2164 'track_script {\n '
2165 'check_backend\n }\n'
2166 '}\n'
2167 }
2168 }
2169
2170 # check keepalived config
2171 assert keepalived_generated_conf[0] == keepalived_expected_conf
2172
2173 # generate the haproxy conf based on the specified spec
2174 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
2175 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
2176
2177 haproxy_expected_conf = {
2178 'files':
2179 {
2180 'haproxy.cfg':
2181 '# This file is generated by cephadm.'
2182 '\nglobal\n log '
2183 '127.0.0.1 local2\n '
2184 'chroot /var/lib/haproxy\n '
2185 'pidfile /var/lib/haproxy/haproxy.pid\n '
2186 'maxconn 8000\n '
2187 'daemon\n '
2188 'stats socket /var/lib/haproxy/stats\n'
2189 '\ndefaults\n '
2190 'mode http\n '
2191 'log global\n '
2192 'option httplog\n '
2193 'option dontlognull\n '
2194 'option http-server-close\n '
2195 'option forwardfor except 127.0.0.0/8\n '
2196 'option redispatch\n '
2197 'retries 3\n '
2198 'timeout queue 20s\n '
2199 'timeout connect 5s\n '
2200 'timeout http-request 1s\n '
2201 'timeout http-keep-alive 5s\n '
2202 'timeout client 30s\n '
2203 'timeout server 30s\n '
2204 'timeout check 5s\n '
2205 'maxconn 8000\n'
2206 '\nfrontend stats\n '
2207 'mode http\n '
2208 'bind [..]:8999\n '
2209 'bind 1.2.3.7:8999\n '
2210 'stats enable\n '
2211 'stats uri /stats\n '
2212 'stats refresh 10s\n '
2213 'stats auth admin:12345\n '
2214 'http-request use-service prometheus-exporter if { path /metrics }\n '
2215 'monitor-uri /health\n'
2216 '\nfrontend frontend\n '
2217 'bind [..]:8089\n '
2218 'default_backend backend\n\n'
2219 'backend backend\n '
2220 'option forwardfor\n '
2221 'balance static-rr\n '
2222 'option httpchk HEAD / HTTP/1.0\n '
2223 'server '
2224 + haproxy_generated_conf[1][0] + ' 1.2.3.7:80 check weight 100 inter 2s\n'
2225 }
2226 }
2227
2228 assert haproxy_generated_conf[0] == haproxy_expected_conf
2229
2230 @patch("cephadm.serve.CephadmServe._run_cephadm")
2231 def test_keepalive_config_multi_interface_vips(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2232 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2233
2234 with with_host(cephadm_module, 'test', addr='1.2.3.1'):
2235 with with_host(cephadm_module, 'test2', addr='1.2.3.2'):
2236 cephadm_module.cache.update_host_networks('test', {
2237 '1.2.3.0/24': {
2238 'if0': ['1.2.3.1']
2239 },
2240 '100.100.100.0/24': {
2241 'if1': ['100.100.100.1']
2242 }
2243 })
2244 cephadm_module.cache.update_host_networks('test2', {
2245 '1.2.3.0/24': {
2246 'if0': ['1.2.3.2']
2247 },
2248 '100.100.100.0/24': {
2249 'if1': ['100.100.100.2']
2250 }
2251 })
2252
2253 # Check the ingress with multiple VIPs
2254 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
2255 rgw_frontend_type='beast')
2256
2257 ispec = IngressSpec(service_type='ingress',
2258 service_id='test',
2259 placement=PlacementSpec(hosts=['test', 'test2']),
2260 backend_service='rgw.foo',
2261 frontend_port=8089,
2262 monitor_port=8999,
2263 monitor_user='admin',
2264 monitor_password='12345',
2265 keepalived_password='12345',
2266 virtual_ips_list=["1.2.3.100/24", "100.100.100.100/24"])
2267 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
2268 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
2269 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
2270
2271 keepalived_expected_conf = {
2272 'files':
2273 {
2274 'keepalived.conf':
2275 '# This file is generated by cephadm.\n'
2276 'global_defs {\n '
2277 'enable_script_security\n '
2278 'script_user root\n'
2279 '}\n\n'
2280 'vrrp_script check_backend {\n '
2281 'script "/usr/bin/curl http://1.2.3.1:8999/health"\n '
2282 'weight -20\n '
2283 'interval 2\n '
2284 'rise 2\n '
2285 'fall 2\n}\n\n'
2286 'vrrp_instance VI_0 {\n '
2287 'state MASTER\n '
2288 'priority 100\n '
2289 'interface if0\n '
2290 'virtual_router_id 50\n '
2291 'advert_int 1\n '
2292 'authentication {\n '
2293 'auth_type PASS\n '
2294 'auth_pass 12345\n '
2295 '}\n '
2296 'unicast_src_ip 1.2.3.1\n '
2297 'unicast_peer {\n '
2298 '1.2.3.2\n '
2299 '}\n '
2300 'virtual_ipaddress {\n '
2301 '1.2.3.100/24 dev if0\n '
2302 '}\n '
2303 'track_script {\n '
2304 'check_backend\n }\n'
2305 '}\n'
2306 'vrrp_instance VI_1 {\n '
2307 'state BACKUP\n '
2308 'priority 90\n '
2309 'interface if1\n '
2310 'virtual_router_id 51\n '
2311 'advert_int 1\n '
2312 'authentication {\n '
2313 'auth_type PASS\n '
2314 'auth_pass 12345\n '
2315 '}\n '
2316 'unicast_src_ip 100.100.100.1\n '
2317 'unicast_peer {\n '
2318 '100.100.100.2\n '
2319 '}\n '
2320 'virtual_ipaddress {\n '
2321 '100.100.100.100/24 dev if1\n '
2322 '}\n '
2323 'track_script {\n '
2324 'check_backend\n }\n'
2325 '}\n'
2326 }
2327 }
2328
2329 # check keepalived config
2330 assert keepalived_generated_conf[0] == keepalived_expected_conf
2331
2332 @patch("cephadm.serve.CephadmServe._run_cephadm")
2333 def test_keepalive_interface_host_filtering(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2334 # we need to make sure keepalive daemons will have an interface
2335 # on the hosts we deploy them on in order to set up their VIP.
2336 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2337
2338 with with_host(cephadm_module, 'test', addr='1.2.3.1'):
2339 with with_host(cephadm_module, 'test2', addr='1.2.3.2'):
2340 with with_host(cephadm_module, 'test3', addr='1.2.3.3'):
2341 with with_host(cephadm_module, 'test4', addr='1.2.3.3'):
2342 # setup "test" and "test4" to have all the necessary interfaces,
2343 # "test2" to have one of them (should still be filtered)
2344 # and "test3" to have none of them
2345 cephadm_module.cache.update_host_networks('test', {
2346 '1.2.3.0/24': {
2347 'if0': ['1.2.3.1']
2348 },
2349 '100.100.100.0/24': {
2350 'if1': ['100.100.100.1']
2351 }
2352 })
2353 cephadm_module.cache.update_host_networks('test2', {
2354 '1.2.3.0/24': {
2355 'if0': ['1.2.3.2']
2356 },
2357 })
2358 cephadm_module.cache.update_host_networks('test4', {
2359 '1.2.3.0/24': {
2360 'if0': ['1.2.3.4']
2361 },
2362 '100.100.100.0/24': {
2363 'if1': ['100.100.100.4']
2364 }
2365 })
2366
2367 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
2368 rgw_frontend_type='beast')
2369
2370 ispec = IngressSpec(service_type='ingress',
2371 service_id='test',
2372 placement=PlacementSpec(hosts=['test', 'test2', 'test3', 'test4']),
2373 backend_service='rgw.foo',
2374 frontend_port=8089,
2375 monitor_port=8999,
2376 monitor_user='admin',
2377 monitor_password='12345',
2378 keepalived_password='12345',
2379 virtual_ips_list=["1.2.3.100/24", "100.100.100.100/24"])
2380 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
2381 # since we're never actually going to refresh the host here,
2382 # check the tmp daemons to see what was placed during the apply
2383 daemons = cephadm_module.cache._get_tmp_daemons()
2384 keepalive_daemons = [d for d in daemons if d.daemon_type == 'keepalived']
2385 hosts_deployed_on = [d.hostname for d in keepalive_daemons]
2386 assert 'test' in hosts_deployed_on
2387 assert 'test2' not in hosts_deployed_on
2388 assert 'test3' not in hosts_deployed_on
2389 assert 'test4' in hosts_deployed_on
2390
2391 @patch("cephadm.serve.CephadmServe._run_cephadm")
2392 def test_haproxy_port_ips(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2393 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2394
2395 with with_host(cephadm_module, 'test', addr='1.2.3.7'):
2396 cephadm_module.cache.update_host_networks('test', {
2397 '1.2.3.0/24': {
2398 'if0': ['1.2.3.4/32']
2399 }
2400 })
2401
2402 # Check the ingress with multiple VIPs
2403 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
2404 rgw_frontend_type='beast')
2405
2406 ip = '1.2.3.100'
2407 frontend_port = 8089
2408
2409 ispec = IngressSpec(service_type='ingress',
2410 service_id='test',
2411 backend_service='rgw.foo',
2412 frontend_port=frontend_port,
2413 monitor_port=8999,
2414 monitor_user='admin',
2415 monitor_password='12345',
2416 keepalived_password='12345',
2417 virtual_ip=f"{ip}/24")
2418 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
2419 # generate the haproxy conf based on the specified spec
2420 haproxy_daemon_spec = cephadm_module.cephadm_services['ingress'].prepare_create(
2421 CephadmDaemonDeploySpec(
2422 host='test',
2423 daemon_type='haproxy',
2424 daemon_id='ingress',
2425 service_name=ispec.service_name()))
2426
2427 assert haproxy_daemon_spec.port_ips == {str(frontend_port): ip}
2428
2429 @patch("cephadm.serve.CephadmServe._run_cephadm")
2430 @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
2431 @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
2432 @patch("cephadm.services.nfs.NFSService.purge", MagicMock())
2433 @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock())
2434 def test_keepalive_only_nfs_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2435 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2436
2437 with with_host(cephadm_module, 'test', addr='1.2.3.7'):
2438 cephadm_module.cache.update_host_networks('test', {
2439 '1.2.3.0/24': {
2440 'if0': ['1.2.3.1']
2441 }
2442 })
2443
2444 # Check the ingress with multiple VIPs
2445 s = NFSServiceSpec(service_id="foo", placement=PlacementSpec(count=1),
2446 virtual_ip='1.2.3.0/24')
2447
2448 ispec = IngressSpec(service_type='ingress',
2449 service_id='test',
2450 backend_service='nfs.foo',
2451 monitor_port=8999,
2452 monitor_user='admin',
2453 monitor_password='12345',
2454 keepalived_password='12345',
2455 virtual_ip='1.2.3.0/24',
2456 keepalive_only=True)
2457 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
2458 nfs_generated_conf, _ = cephadm_module.cephadm_services['nfs'].generate_config(
2459 CephadmDaemonDeploySpec(host='test', daemon_id='foo.test.0.0', service_name=s.service_name()))
2460 ganesha_conf = nfs_generated_conf['files']['ganesha.conf']
2461 assert "Bind_addr = 1.2.3.0/24" in ganesha_conf
2462
2463 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
2464 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
2465
2466 keepalived_expected_conf = {
2467 'files':
2468 {
2469 'keepalived.conf':
2470 '# This file is generated by cephadm.\n'
2471 'global_defs {\n '
2472 'enable_script_security\n '
2473 'script_user root\n'
2474 '}\n\n'
2475 'vrrp_script check_backend {\n '
2476 'script "/usr/bin/false"\n '
2477 'weight -20\n '
2478 'interval 2\n '
2479 'rise 2\n '
2480 'fall 2\n}\n\n'
2481 'vrrp_instance VI_0 {\n '
2482 'state MASTER\n '
2483 'priority 100\n '
2484 'interface if0\n '
2485 'virtual_router_id 50\n '
2486 'advert_int 1\n '
2487 'authentication {\n '
2488 'auth_type PASS\n '
2489 'auth_pass 12345\n '
2490 '}\n '
2491 'unicast_src_ip 1.2.3.1\n '
2492 'unicast_peer {\n '
2493 '}\n '
2494 'virtual_ipaddress {\n '
2495 '1.2.3.0/24 dev if0\n '
2496 '}\n '
2497 'track_script {\n '
2498 'check_backend\n }\n'
2499 '}\n'
2500 }
2501 }
2502
2503 # check keepalived config
2504 assert keepalived_generated_conf[0] == keepalived_expected_conf
2505
2506 @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
2507 @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
2508 @patch("cephadm.services.nfs.NFSService.purge", MagicMock())
2509 @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock())
2510 @patch("cephadm.inventory.Inventory.keys")
2511 @patch("cephadm.inventory.Inventory.get_addr")
2512 @patch("cephadm.utils.resolve_ip")
2513 @patch("cephadm.inventory.HostCache.get_daemons_by_service")
2514 @patch("cephadm.serve.CephadmServe._run_cephadm")
2515 def test_ingress_config_nfs_proxy_protocol(
2516 self,
2517 _run_cephadm,
2518 _get_daemons_by_service,
2519 _resolve_ip,
2520 _get_addr,
2521 _inventory_keys,
2522 cephadm_module: CephadmOrchestrator,
2523 ):
2524 """Verify that setting enable_haproxy_protocol for both ingress and
2525 nfs services sets the desired configuration parameters in both
2526 the haproxy config and nfs ganesha config.
2527 """
2528 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2529
2530 def fake_resolve_ip(hostname: str) -> str:
2531 if hostname in ('host1', "192.168.122.111"):
2532 return '192.168.122.111'
2533 elif hostname in ('host2', '192.168.122.222'):
2534 return '192.168.122.222'
2535 else:
2536 raise KeyError(hostname)
2537 _resolve_ip.side_effect = fake_resolve_ip
2538 _get_addr.side_effect = fake_resolve_ip
2539
2540 def fake_keys():
2541 return ['host1', 'host2']
2542 _inventory_keys.side_effect = fake_keys
2543
2544 nfs_service = NFSServiceSpec(
2545 service_id="foo",
2546 placement=PlacementSpec(
2547 count=1,
2548 hosts=['host1', 'host2']),
2549 port=12049,
2550 enable_haproxy_protocol=True,
2551 enable_nlm=True,
2552 )
2553
2554 ispec = IngressSpec(
2555 service_type='ingress',
2556 service_id='nfs.foo',
2557 backend_service='nfs.foo',
2558 frontend_port=2049,
2559 monitor_port=9049,
2560 virtual_ip='192.168.122.100/24',
2561 monitor_user='admin',
2562 monitor_password='12345',
2563 keepalived_password='12345',
2564 enable_haproxy_protocol=True,
2565 )
2566
2567 cephadm_module.spec_store._specs = {
2568 'nfs.foo': nfs_service,
2569 'ingress.nfs.foo': ispec
2570 }
2571 cephadm_module.spec_store.spec_created = {
2572 'nfs.foo': datetime_now(),
2573 'ingress.nfs.foo': datetime_now()
2574 }
2575
2576 haproxy_txt = (
2577 '# This file is generated by cephadm.\n'
2578 'global\n'
2579 ' log 127.0.0.1 local2\n'
2580 ' chroot /var/lib/haproxy\n'
2581 ' pidfile /var/lib/haproxy/haproxy.pid\n'
2582 ' maxconn 8000\n'
2583 ' daemon\n'
2584 ' stats socket /var/lib/haproxy/stats\n\n'
2585 'defaults\n'
2586 ' mode tcp\n'
2587 ' log global\n'
2588 ' timeout queue 1m\n'
2589 ' timeout connect 10s\n'
2590 ' timeout client 1m\n'
2591 ' timeout server 1m\n'
2592 ' timeout check 10s\n'
2593 ' maxconn 8000\n\n'
2594 'frontend stats\n'
2595 ' mode http\n'
2596 ' bind 192.168.122.100:9049\n'
2597 ' bind 192.168.122.111:9049\n'
2598 ' stats enable\n'
2599 ' stats uri /stats\n'
2600 ' stats refresh 10s\n'
2601 ' stats auth admin:12345\n'
2602 ' http-request use-service prometheus-exporter if { path /metrics }\n'
2603 ' monitor-uri /health\n\n'
2604 'frontend frontend\n'
2605 ' bind 192.168.122.100:2049\n'
2606 ' default_backend backend\n\n'
2607 'backend backend\n'
2608 ' mode tcp\n'
2609 ' balance source\n'
2610 ' hash-type consistent\n'
2611 ' default-server send-proxy-v2\n'
2612 ' server nfs.foo.0 192.168.122.111:12049 check\n'
2613 )
2614 haproxy_expected_conf = {
2615 'files': {'haproxy.cfg': haproxy_txt}
2616 }
2617
2618 nfs_ganesha_txt = (
2619 "# This file is generated by cephadm.\n"
2620 'NFS_CORE_PARAM {\n'
2621 ' Enable_NLM = true;\n'
2622 ' Enable_RQUOTA = false;\n'
2623 ' Protocols = 4;\n'
2624 ' NFS_Port = 2049;\n'
2625 ' HAProxy_Hosts = 192.168.122.111, 10.10.2.20, 192.168.122.222;\n'
2626 '}\n'
2627 '\n'
2628 'NFSv4 {\n'
2629 ' Delegations = false;\n'
2630 " RecoveryBackend = 'rados_cluster';\n"
2631 ' Minor_Versions = 1, 2;\n'
2632 ' IdmapConf = "/etc/ganesha/idmap.conf";\n'
2633 '}\n'
2634 '\n'
2635 'RADOS_KV {\n'
2636 ' UserId = "nfs.foo.test.0.0";\n'
2637 ' nodeid = "nfs.foo.None";\n'
2638 ' pool = ".nfs";\n'
2639 ' namespace = "foo";\n'
2640 '}\n'
2641 '\n'
2642 'RADOS_URLS {\n'
2643 ' UserId = "nfs.foo.test.0.0";\n'
2644 ' watch_url = '
2645 '"rados://.nfs/foo/conf-nfs.foo";\n'
2646 '}\n'
2647 '\n'
2648 'RGW {\n'
2649 ' cluster = "ceph";\n'
2650 ' name = "client.nfs.foo.test.0.0-rgw";\n'
2651 '}\n'
2652 '\n'
2653 "%url rados://.nfs/foo/conf-nfs.foo"
2654 )
2655 nfs_expected_conf = {
2656 'files': {'ganesha.conf': nfs_ganesha_txt, 'idmap.conf': ''},
2657 'config': '',
2658 'extra_args': ['-N', 'NIV_EVENT'],
2659 'keyring': (
2660 '[client.nfs.foo.test.0.0]\n'
2661 'key = None\n'
2662 ),
2663 'namespace': 'foo',
2664 'pool': '.nfs',
2665 'rgw': {
2666 'cluster': 'ceph',
2667 'keyring': (
2668 '[client.nfs.foo.test.0.0-rgw]\n'
2669 'key = None\n'
2670 ),
2671 'user': 'nfs.foo.test.0.0-rgw',
2672 },
2673 'userid': 'nfs.foo.test.0.0',
2674 }
2675
2676 nfs_daemons = [
2677 DaemonDescription(
2678 daemon_type='nfs',
2679 daemon_id='foo.0.1.host1.qwerty',
2680 hostname='host1',
2681 rank=0,
2682 rank_generation=1,
2683 ports=[12049],
2684 ),
2685 DaemonDescription(
2686 daemon_type='nfs',
2687 daemon_id='foo.0.0.host2.abcdef',
2688 hostname='host2',
2689 rank=0,
2690 rank_generation=0,
2691 ports=[12049],
2692 ),
2693 ]
2694 _get_daemons_by_service.return_value = nfs_daemons
2695
2696 ingress_svc = cephadm_module.cephadm_services['ingress']
2697 nfs_svc = cephadm_module.cephadm_services['nfs']
2698
2699 # add host network info to one host to test the behavior of
2700 # adding all known-good addresses of the host to the list.
2701 cephadm_module.cache.update_host_networks('host1', {
2702 # this one is additional
2703 '10.10.2.0/24': {
2704 'eth1': ['10.10.2.20']
2705 },
2706 # this is redundant and will be skipped
2707 '192.168.122.0/24': {
2708 'eth0': ['192.168.122.111']
2709 },
2710 # this is a link-local address and will be ignored
2711 "fe80::/64": {
2712 "veth0": [
2713 "fe80::8cf5:25ff:fe1c:d963"
2714 ],
2715 "eth0": [
2716 "fe80::c7b:cbff:fef6:7370"
2717 ],
2718 "eth1": [
2719 "fe80::7201:25a7:390b:d9a7"
2720 ]
2721 },
2722 })
2723
2724 haproxy_generated_conf, _ = ingress_svc.haproxy_generate_config(
2725 CephadmDaemonDeploySpec(
2726 host='host1',
2727 daemon_id='ingress',
2728 service_name=ispec.service_name(),
2729 ),
2730 )
2731 assert haproxy_generated_conf == haproxy_expected_conf
2732
2733 nfs_generated_conf, _ = nfs_svc.generate_config(
2734 CephadmDaemonDeploySpec(
2735 host='test',
2736 daemon_id='foo.test.0.0',
2737 service_name=nfs_service.service_name(),
2738 ),
2739 )
2740 assert nfs_generated_conf == nfs_expected_conf
2741
2742
2743 class TestCephFsMirror:
2744 @patch("cephadm.serve.CephadmServe._run_cephadm")
2745 def test_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2746 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2747 with with_host(cephadm_module, 'test'):
2748 with with_service(cephadm_module, ServiceSpec('cephfs-mirror')):
2749 cephadm_module.assert_issued_mon_command({
2750 'prefix': 'mgr module enable',
2751 'module': 'mirroring'
2752 })
2753
2754
2755 class TestJaeger:
2756 @patch("cephadm.serve.CephadmServe._run_cephadm")
2757 def test_jaeger_query(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2758 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2759
2760 spec = TracingSpec(es_nodes="192.168.0.1:9200",
2761 service_type="jaeger-query")
2762
2763 config = {"elasticsearch_nodes": "http://192.168.0.1:9200"}
2764
2765 with with_host(cephadm_module, 'test'):
2766 with with_service(cephadm_module, spec):
2767 _run_cephadm.assert_called_with(
2768 'test',
2769 "jaeger-query.test",
2770 ['_orch', 'deploy'],
2771 [],
2772 stdin=json.dumps({
2773 "fsid": "fsid",
2774 "name": 'jaeger-query.test',
2775 "image": '',
2776 "deploy_arguments": [],
2777 "params": {
2778 'tcp_ports': [16686],
2779 },
2780 "meta": {
2781 'service_name': 'jaeger-query',
2782 'ports': [16686],
2783 'ip': None,
2784 'deployed_by': [],
2785 'rank': None,
2786 'rank_generation': None,
2787 'extra_container_args': None,
2788 'extra_entrypoint_args': None,
2789 },
2790 "config_blobs": config,
2791 }),
2792 use_current_daemon_image=False,
2793 )
2794
2795 @patch("cephadm.serve.CephadmServe._run_cephadm")
2796 def test_jaeger_collector_es_deploy(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2797 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2798
2799 collector_spec = TracingSpec(service_type="jaeger-collector")
2800 es_spec = TracingSpec(service_type="elasticsearch")
2801 es_config = {}
2802
2803 with with_host(cephadm_module, 'test'):
2804 collector_config = {
2805 "elasticsearch_nodes": f'http://{build_url(host=cephadm_module.inventory.get_addr("test"), port=9200).lstrip("/")}'}
2806 with with_service(cephadm_module, es_spec):
2807 _run_cephadm.assert_called_with(
2808 "test",
2809 "elasticsearch.test",
2810 ['_orch', 'deploy'],
2811 [],
2812 stdin=json.dumps({
2813 "fsid": "fsid",
2814 "name": 'elasticsearch.test',
2815 "image": '',
2816 "deploy_arguments": [],
2817 "params": {
2818 'tcp_ports': [9200],
2819 },
2820 "meta": {
2821 'service_name': 'elasticsearch',
2822 'ports': [9200],
2823 'ip': None,
2824 'deployed_by': [],
2825 'rank': None,
2826 'rank_generation': None,
2827 'extra_container_args': None,
2828 'extra_entrypoint_args': None,
2829 },
2830 "config_blobs": es_config,
2831 }),
2832 use_current_daemon_image=False,
2833 )
2834 with with_service(cephadm_module, collector_spec):
2835 _run_cephadm.assert_called_with(
2836 "test",
2837 "jaeger-collector.test",
2838 ['_orch', 'deploy'],
2839 [],
2840 stdin=json.dumps({
2841 "fsid": "fsid",
2842 "name": 'jaeger-collector.test',
2843 "image": '',
2844 "deploy_arguments": [],
2845 "params": {
2846 'tcp_ports': [14250],
2847 },
2848 "meta": {
2849 'service_name': 'jaeger-collector',
2850 'ports': [14250],
2851 'ip': None,
2852 'deployed_by': [],
2853 'rank': None,
2854 'rank_generation': None,
2855 'extra_container_args': None,
2856 'extra_entrypoint_args': None,
2857 },
2858 "config_blobs": collector_config,
2859 }),
2860 use_current_daemon_image=False,
2861 )
2862
2863 @patch("cephadm.serve.CephadmServe._run_cephadm")
2864 def test_jaeger_agent(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
2865 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2866
2867 collector_spec = TracingSpec(service_type="jaeger-collector", es_nodes="192.168.0.1:9200")
2868 collector_config = {"elasticsearch_nodes": "http://192.168.0.1:9200"}
2869
2870 agent_spec = TracingSpec(service_type="jaeger-agent")
2871 agent_config = {"collector_nodes": "test:14250"}
2872
2873 with with_host(cephadm_module, 'test'):
2874 with with_service(cephadm_module, collector_spec):
2875 _run_cephadm.assert_called_with(
2876 "test",
2877 "jaeger-collector.test",
2878 ['_orch', 'deploy'],
2879 [],
2880 stdin=json.dumps({
2881 "fsid": "fsid",
2882 "name": 'jaeger-collector.test',
2883 "image": '',
2884 "deploy_arguments": [],
2885 "params": {
2886 'tcp_ports': [14250],
2887 },
2888 "meta": {
2889 'service_name': 'jaeger-collector',
2890 'ports': [14250],
2891 'ip': None,
2892 'deployed_by': [],
2893 'rank': None,
2894 'rank_generation': None,
2895 'extra_container_args': None,
2896 'extra_entrypoint_args': None,
2897 },
2898 "config_blobs": collector_config,
2899 }),
2900 use_current_daemon_image=False,
2901 )
2902 with with_service(cephadm_module, agent_spec):
2903 _run_cephadm.assert_called_with(
2904 "test",
2905 "jaeger-agent.test",
2906 ['_orch', 'deploy'],
2907 [],
2908 stdin=json.dumps({
2909 "fsid": "fsid",
2910 "name": 'jaeger-agent.test',
2911 "image": '',
2912 "deploy_arguments": [],
2913 "params": {
2914 'tcp_ports': [6799],
2915 },
2916 "meta": {
2917 'service_name': 'jaeger-agent',
2918 'ports': [6799],
2919 'ip': None,
2920 'deployed_by': [],
2921 'rank': None,
2922 'rank_generation': None,
2923 'extra_container_args': None,
2924 'extra_entrypoint_args': None,
2925 },
2926 "config_blobs": agent_config,
2927 }),
2928 use_current_daemon_image=False,
2929 )
2930
2931
2932 class TestCustomContainer:
2933 @patch("cephadm.serve.CephadmServe._run_cephadm")
2934 def test_deploy_custom_container(
2935 self, _run_cephadm, cephadm_module: CephadmOrchestrator
2936 ):
2937 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2938
2939 spec = CustomContainerSpec(
2940 service_id='tsettinu',
2941 image='quay.io/foobar/barbaz:latest',
2942 entrypoint='/usr/local/bin/blat.sh',
2943 ports=[9090],
2944 )
2945
2946 with with_host(cephadm_module, 'test'):
2947 with with_service(cephadm_module, spec):
2948 _run_cephadm.assert_called_with(
2949 'test',
2950 "container.tsettinu.test",
2951 ['_orch', 'deploy'],
2952 [],
2953 stdin=json.dumps(
2954 {
2955 "fsid": "fsid",
2956 "name": 'container.tsettinu.test',
2957 "image": 'quay.io/foobar/barbaz:latest',
2958 "deploy_arguments": [],
2959 "params": {
2960 'tcp_ports': [9090],
2961 },
2962 "meta": {
2963 'service_name': 'container.tsettinu',
2964 'ports': [],
2965 'ip': None,
2966 'deployed_by': [],
2967 'rank': None,
2968 'rank_generation': None,
2969 'extra_container_args': None,
2970 'extra_entrypoint_args': None,
2971 },
2972 "config_blobs": {
2973 "image": "quay.io/foobar/barbaz:latest",
2974 "entrypoint": "/usr/local/bin/blat.sh",
2975 "args": [],
2976 "envs": [],
2977 "volume_mounts": {},
2978 "privileged": False,
2979 "ports": [9090],
2980 "dirs": [],
2981 "files": {},
2982 },
2983 }
2984 ),
2985 use_current_daemon_image=False,
2986 )
2987
2988 @patch("cephadm.serve.CephadmServe._run_cephadm")
2989 def test_deploy_custom_container_with_init_ctrs(
2990 self, _run_cephadm, cephadm_module: CephadmOrchestrator
2991 ):
2992 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
2993
2994 spec = CustomContainerSpec(
2995 service_id='tsettinu',
2996 image='quay.io/foobar/barbaz:latest',
2997 entrypoint='/usr/local/bin/blat.sh',
2998 ports=[9090],
2999 init_containers=[
3000 {'entrypoint': '/usr/local/bin/prepare.sh'},
3001 {
3002 'entrypoint': '/usr/local/bin/optimize.sh',
3003 'entrypoint_args': [
3004 '--timeout=5m',
3005 '--cores=8',
3006 {'argument': '--title=Alpha One'},
3007 ],
3008 },
3009 ],
3010 )
3011
3012 expected = {
3013 'fsid': 'fsid',
3014 'name': 'container.tsettinu.test',
3015 'image': 'quay.io/foobar/barbaz:latest',
3016 'deploy_arguments': [],
3017 'params': {
3018 'tcp_ports': [9090],
3019 'init_containers': [
3020 {'entrypoint': '/usr/local/bin/prepare.sh'},
3021 {
3022 'entrypoint': '/usr/local/bin/optimize.sh',
3023 'entrypoint_args': [
3024 '--timeout=5m',
3025 '--cores=8',
3026 '--title=Alpha One',
3027 ],
3028 },
3029 ],
3030 },
3031 'meta': {
3032 'service_name': 'container.tsettinu',
3033 'ports': [],
3034 'ip': None,
3035 'deployed_by': [],
3036 'rank': None,
3037 'rank_generation': None,
3038 'extra_container_args': None,
3039 'extra_entrypoint_args': None,
3040 'init_containers': [
3041 {'entrypoint': '/usr/local/bin/prepare.sh'},
3042 {
3043 'entrypoint': '/usr/local/bin/optimize.sh',
3044 'entrypoint_args': [
3045 '--timeout=5m',
3046 '--cores=8',
3047 {'argument': '--title=Alpha One', 'split': False},
3048 ],
3049 },
3050 ],
3051 },
3052 'config_blobs': {
3053 'image': 'quay.io/foobar/barbaz:latest',
3054 'entrypoint': '/usr/local/bin/blat.sh',
3055 'args': [],
3056 'envs': [],
3057 'volume_mounts': {},
3058 'privileged': False,
3059 'ports': [9090],
3060 'dirs': [],
3061 'files': {},
3062 },
3063 }
3064 with with_host(cephadm_module, 'test'):
3065 with with_service(cephadm_module, spec):
3066 _run_cephadm.assert_called_with(
3067 'test',
3068 'container.tsettinu.test',
3069 ['_orch', 'deploy'],
3070 [],
3071 stdin=json.dumps(expected),
3072 use_current_daemon_image=False,
3073 )
3074
3075
3076 class TestSMB:
3077 @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
3078 @patch("cephadm.serve.CephadmServe._run_cephadm")
3079 def test_deploy_smb(
3080 self, _run_cephadm, _get_uname, cephadm_module: CephadmOrchestrator
3081 ):
3082 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
3083 _get_uname.return_value = 'tango.briskly'
3084
3085 spec = SMBSpec(
3086 cluster_id='foxtrot',
3087 config_uri='rados://.smb/foxtrot/config.json',
3088 )
3089
3090 expected = {
3091 'fsid': 'fsid',
3092 'name': 'smb.tango.briskly',
3093 'image': '',
3094 'deploy_arguments': [],
3095 'params': {},
3096 'meta': {
3097 'service_name': 'smb',
3098 'ports': [],
3099 'ip': None,
3100 'deployed_by': [],
3101 'rank': None,
3102 'rank_generation': None,
3103 'extra_container_args': None,
3104 'extra_entrypoint_args': None,
3105 },
3106 'config_blobs': {
3107 'cluster_id': 'foxtrot',
3108 'features': [],
3109 'config_uri': 'rados://.smb/foxtrot/config.json',
3110 'config': '',
3111 'keyring': '[client.smb.config.tango.briskly]\nkey = None\n',
3112 'config_auth_entity': 'client.smb.config.tango.briskly',
3113 },
3114 }
3115 with with_host(cephadm_module, 'hostx'):
3116 with with_service(cephadm_module, spec):
3117 _run_cephadm.assert_called_with(
3118 'hostx',
3119 'smb.tango.briskly',
3120 ['_orch', 'deploy'],
3121 [],
3122 stdin=json.dumps(expected),
3123 use_current_daemon_image=False
3124 )
3125
3126 @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
3127 @patch("cephadm.serve.CephadmServe._run_cephadm")
3128 def test_deploy_smb_join_dns(
3129 self, _run_cephadm, _get_uname, cephadm_module: CephadmOrchestrator
3130 ):
3131 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
3132 _get_uname.return_value = 'tango.briskly'
3133
3134 spec = SMBSpec(
3135 cluster_id='foxtrot',
3136 features=['domain'],
3137 config_uri='rados://.smb/foxtrot/config2.json',
3138 join_sources=[
3139 'rados://.smb/foxtrot/join1.json',
3140 'rados:mon-config-key:smb/config/foxtrot/join2.json',
3141 ],
3142 custom_dns=['10.8.88.103'],
3143 include_ceph_users=[
3144 'client.smb.fs.cephfs.share1',
3145 'client.smb.fs.cephfs.share2',
3146 'client.smb.fs.fs2.share3',
3147 ],
3148 )
3149
3150 expected = {
3151 'fsid': 'fsid',
3152 'name': 'smb.tango.briskly',
3153 'image': '',
3154 'deploy_arguments': [],
3155 'params': {},
3156 'meta': {
3157 'service_name': 'smb',
3158 'ports': [],
3159 'ip': None,
3160 'deployed_by': [],
3161 'rank': None,
3162 'rank_generation': None,
3163 'extra_container_args': None,
3164 'extra_entrypoint_args': None,
3165 },
3166 'config_blobs': {
3167 'cluster_id': 'foxtrot',
3168 'features': ['domain'],
3169 'config_uri': 'rados://.smb/foxtrot/config2.json',
3170 'join_sources': [
3171 'rados://.smb/foxtrot/join1.json',
3172 'rados:mon-config-key:smb/config/foxtrot/join2.json',
3173 ],
3174 'custom_dns': ['10.8.88.103'],
3175 'config': '',
3176 'keyring': (
3177 '[client.smb.config.tango.briskly]\nkey = None\n\n'
3178 '[client.smb.fs.cephfs.share1]\nkey = None\n\n'
3179 '[client.smb.fs.cephfs.share2]\nkey = None\n\n'
3180 '[client.smb.fs.fs2.share3]\nkey = None\n'
3181 ),
3182 'config_auth_entity': 'client.smb.config.tango.briskly',
3183 },
3184 }
3185 with with_host(cephadm_module, 'hostx'):
3186 with with_service(cephadm_module, spec):
3187 _run_cephadm.assert_called_with(
3188 'hostx',
3189 'smb.tango.briskly',
3190 ['_orch', 'deploy'],
3191 [],
3192 stdin=json.dumps(expected),
3193 use_current_daemon_image=False
3194 )