]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_services.py
add stop-gap to fix compat with CPUs not supporting SSE 4.1
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_services.py
1 from textwrap import dedent
2 import json
3 import urllib.parse
4 import yaml
5 from mgr_util import build_url
6
7 import pytest
8
9 from unittest.mock import MagicMock, call, patch, ANY
10
11 from cephadm.serve import CephadmServe
12 from cephadm.services.cephadmservice import MonService, MgrService, MdsService, RgwService, \
13 RbdMirrorService, CrashService, CephadmDaemonDeploySpec
14 from cephadm.services.iscsi import IscsiService
15 from cephadm.services.nfs import NFSService
16 from cephadm.services.osd import OSDService
17 from cephadm.services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \
18 NodeExporterService, LokiService, PromtailService
19 from cephadm.module import CephadmOrchestrator
20 from ceph.deployment.service_spec import IscsiServiceSpec, MonitoringSpec, AlertManagerSpec, \
21 ServiceSpec, RGWSpec, GrafanaSpec, SNMPGatewaySpec, IngressSpec, PlacementSpec, TracingSpec, \
22 PrometheusSpec, CephExporterSpec, NFSServiceSpec
23 from cephadm.tests.fixtures import with_host, with_service, _run_cephadm, async_side_effect
24
25 from ceph.utils import datetime_now
26
27 from orchestrator import OrchestratorError
28 from orchestrator._interface import DaemonDescription
29
30 from typing import Dict, List
31
32 grafana_cert = """-----BEGIN CERTIFICATE-----\nMIICxjCCAa4CEQDIZSujNBlKaLJzmvntjukjMA0GCSqGSIb3DQEBDQUAMCExDTAL\nBgNVBAoMBENlcGgxEDAOBgNVBAMMB2NlcGhhZG0wHhcNMjIwNzEzMTE0NzA3WhcN\nMzIwNzEwMTE0NzA3WjAhMQ0wCwYDVQQKDARDZXBoMRAwDgYDVQQDDAdjZXBoYWRt\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyyMe4DMA+MeYK7BHZMHB\nq7zjliEOcNgxomjU8qbf5USF7Mqrf6+/87XWqj4pCyAW8x0WXEr6A56a+cmBVmt+\nqtWDzl020aoId6lL5EgLLn6/kMDCCJLq++Lg9cEofMSvcZh+lY2f+1p+C+00xent\nrLXvXGOilAZWaQfojT2BpRnNWWIFbpFwlcKrlg2G0cFjV5c1m6a0wpsQ9JHOieq0\nSvwCixajwq3CwAYuuiU1wjI4oJO4Io1+g8yB3nH2Mo/25SApCxMXuXh4kHLQr/T4\n4hqisvG4uJYgKMcSIrWj5o25mclByGi1UI/kZkCUES94i7Z/3ihx4Bad0AMs/9tw\nFwIDAQABMA0GCSqGSIb3DQEBDQUAA4IBAQAf+pwz7Gd7mDwU2LY0TQXsK6/8KGzh\nHuX+ErOb8h5cOAbvCnHjyJFWf6gCITG98k9nxU9NToG0WYuNm/max1y/54f0dtxZ\npUo6KSNl3w6iYCfGOeUIj8isi06xMmeTgMNzv8DYhDt+P2igN6LenqWTVztogkiV\nxQ5ZJFFLEw4sN0CXnrZX3t5ruakxLXLTLKeE0I91YJvjClSBGkVJq26wOKQNHMhx\npWxeydQ5EgPZY+Aviz5Dnxe8aB7oSSovpXByzxURSabOuCK21awW5WJCGNpmqhWK\nZzACBDEstccj57c4OGV0eayHJRsluVr2e9NHRINZA3qdB37e6gsI1xHo\n-----END CERTIFICATE-----\n"""
33
34 grafana_key = """-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDLIx7gMwD4x5gr\nsEdkwcGrvOOWIQ5w2DGiaNTypt/lRIXsyqt/r7/ztdaqPikLIBbzHRZcSvoDnpr5\nyYFWa36q1YPOXTbRqgh3qUvkSAsufr+QwMIIkur74uD1wSh8xK9xmH6VjZ/7Wn4L\n7TTF6e2ste9cY6KUBlZpB+iNPYGlGc1ZYgVukXCVwquWDYbRwWNXlzWbprTCmxD0\nkc6J6rRK/AKLFqPCrcLABi66JTXCMjigk7gijX6DzIHecfYyj/blICkLExe5eHiQ\nctCv9PjiGqKy8bi4liAoxxIitaPmjbmZyUHIaLVQj+RmQJQRL3iLtn/eKHHgFp3Q\nAyz/23AXAgMBAAECggEAVoTB3Mm8azlPlaQB9GcV3tiXslSn+uYJ1duCf0sV52dV\nBzKW8s5fGiTjpiTNhGCJhchowqxoaew+o47wmGc2TvqbpeRLuecKrjScD0GkCYyQ\neM2wlshEbz4FhIZdgS6gbuh9WaM1dW/oaZoBNR5aTYo7xYTmNNeyLA/jO2zr7+4W\n5yES1lMSBXpKk7bDGKYY4bsX2b5RLr2Grh2u2bp7hoLABCEvuu8tSQdWXLEXWpXo\njwmV3hc6tabypIa0mj2Dmn2Dmt1ppSO0AZWG/WAizN3f4Z0r/u9HnbVrVmh0IEDw\n3uf2LP5o3msG9qKCbzv3lMgt9mMr70HOKnJ8ohMSKQKBgQDLkNb+0nr152HU9AeJ\nvdz8BeMxcwxCG77iwZphZ1HprmYKvvXgedqWtS6FRU+nV6UuQoPUbQxJBQzrN1Qv\nwKSlOAPCrTJgNgF/RbfxZTrIgCPuK2KM8I89VZv92TSGi362oQA4MazXC8RAWjoJ\nSu1/PHzK3aXOfVNSLrOWvIYeZQKBgQD/dgT6RUXKg0UhmXj7ExevV+c7oOJTDlMl\nvLngrmbjRgPO9VxLnZQGdyaBJeRngU/UXfNgajT/MU8B5fSKInnTMawv/tW7634B\nw3v6n5kNIMIjJmENRsXBVMllDTkT9S7ApV+VoGnXRccbTiDapBThSGd0wri/CuwK\nNWK1YFOeywKBgEDyI/XG114PBUJ43NLQVWm+wx5qszWAPqV/2S5MVXD1qC6zgCSv\nG9NLWN1CIMimCNg6dm7Wn73IM7fzvhNCJgVkWqbItTLG6DFf3/DPODLx1wTMqLOI\nqFqMLqmNm9l1Nec0dKp5BsjRQzq4zp1aX21hsfrTPmwjxeqJZdioqy2VAoGAXR5X\nCCdSHlSlUW8RE2xNOOQw7KJjfWT+WAYoN0c7R+MQplL31rRU7dpm1bLLRBN11vJ8\nMYvlT5RYuVdqQSP6BkrX+hLJNBvOLbRlL+EXOBrVyVxHCkDe+u7+DnC4epbn+N8P\nLYpwqkDMKB7diPVAizIKTBxinXjMu5fkKDs5n+sCgYBbZheYKk5M0sIxiDfZuXGB\nkf4mJdEkTI1KUGRdCwO/O7hXbroGoUVJTwqBLi1tKqLLarwCITje2T200BYOzj82\nqwRkCXGtXPKnxYEEUOiFx9OeDrzsZV00cxsEnX0Zdj+PucQ/J3Cvd0dWUspJfLHJ\n39gnaegswnz9KMQAvzKFdg==\n-----END PRIVATE KEY-----\n"""
35
36
37 class FakeInventory:
38 def get_addr(self, name: str) -> str:
39 return '1.2.3.4'
40
41
42 class FakeMgr:
43 def __init__(self):
44 self.config = ''
45 self.set_mon_crush_locations: Dict[str, List[str]] = {}
46 self.check_mon_command = MagicMock(side_effect=self._check_mon_command)
47 self.mon_command = MagicMock(side_effect=self._check_mon_command)
48 self.template = MagicMock()
49 self.log = MagicMock()
50 self.inventory = FakeInventory()
51
52 def _check_mon_command(self, cmd_dict, inbuf=None):
53 prefix = cmd_dict.get('prefix')
54 if prefix == 'get-cmd':
55 return 0, self.config, ''
56 if prefix == 'set-cmd':
57 self.config = cmd_dict.get('value')
58 return 0, 'value set', ''
59 if prefix in ['auth get']:
60 return 0, '[foo]\nkeyring = asdf\n', ''
61 if prefix == 'quorum_status':
62 # actual quorum status output from testing
63 # note in this output all of the mons have blank crush locations
64 return 0, """{"election_epoch": 14, "quorum": [0, 1, 2], "quorum_names": ["vm-00", "vm-01", "vm-02"], "quorum_leader_name": "vm-00", "quorum_age": 101, "features": {"quorum_con": "4540138322906710015", "quorum_mon": ["kraken", "luminous", "mimic", "osdmap-prune", "nautilus", "octopus", "pacific", "elector-pinging", "quincy", "reef"]}, "monmap": {"epoch": 3, "fsid": "9863e1b8-6f24-11ed-8ad8-525400c13ad2", "modified": "2022-11-28T14:00:29.972488Z", "created": "2022-11-28T13:57:55.847497Z", "min_mon_release": 18, "min_mon_release_name": "reef", "election_strategy": 1, "disallowed_leaders: ": "", "stretch_mode": false, "tiebreaker_mon": "", "features": {"persistent": ["kraken", "luminous", "mimic", "osdmap-prune", "nautilus", "octopus", "pacific", "elector-pinging", "quincy", "reef"], "optional": []}, "mons": [{"rank": 0, "name": "vm-00", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.61:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.61:6789", "nonce": 0}]}, "addr": "192.168.122.61:6789/0", "public_addr": "192.168.122.61:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}, {"rank": 1, "name": "vm-01", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.63:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.63:6789", "nonce": 0}]}, "addr": "192.168.122.63:6789/0", "public_addr": "192.168.122.63:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}, {"rank": 2, "name": "vm-02", "public_addrs": {"addrvec": [{"type": "v2", "addr": "192.168.122.82:3300", "nonce": 0}, {"type": "v1", "addr": "192.168.122.82:6789", "nonce": 0}]}, "addr": "192.168.122.82:6789/0", "public_addr": "192.168.122.82:6789/0", "priority": 0, "weight": 0, "crush_location": "{}"}]}}""", ''
65 if prefix == 'mon set_location':
66 self.set_mon_crush_locations[cmd_dict.get('name')] = cmd_dict.get('args')
67 return 0, '', ''
68 return -1, '', 'error'
69
70 def get_minimal_ceph_conf(self) -> str:
71 return ''
72
73 def get_mgr_ip(self) -> str:
74 return '1.2.3.4'
75
76
77 class TestCephadmService:
78 def test_set_service_url_on_dashboard(self):
79 # pylint: disable=protected-access
80 mgr = FakeMgr()
81 service_url = 'http://svc:1000'
82 service = GrafanaService(mgr)
83 service._set_service_url_on_dashboard('svc', 'get-cmd', 'set-cmd', service_url)
84 assert mgr.config == service_url
85
86 # set-cmd should not be called if value doesn't change
87 mgr.check_mon_command.reset_mock()
88 service._set_service_url_on_dashboard('svc', 'get-cmd', 'set-cmd', service_url)
89 mgr.check_mon_command.assert_called_once_with({'prefix': 'get-cmd'})
90
91 def _get_services(self, mgr):
92 # services:
93 osd_service = OSDService(mgr)
94 nfs_service = NFSService(mgr)
95 mon_service = MonService(mgr)
96 mgr_service = MgrService(mgr)
97 mds_service = MdsService(mgr)
98 rgw_service = RgwService(mgr)
99 rbd_mirror_service = RbdMirrorService(mgr)
100 grafana_service = GrafanaService(mgr)
101 alertmanager_service = AlertmanagerService(mgr)
102 prometheus_service = PrometheusService(mgr)
103 node_exporter_service = NodeExporterService(mgr)
104 loki_service = LokiService(mgr)
105 promtail_service = PromtailService(mgr)
106 crash_service = CrashService(mgr)
107 iscsi_service = IscsiService(mgr)
108 cephadm_services = {
109 'mon': mon_service,
110 'mgr': mgr_service,
111 'osd': osd_service,
112 'mds': mds_service,
113 'rgw': rgw_service,
114 'rbd-mirror': rbd_mirror_service,
115 'nfs': nfs_service,
116 'grafana': grafana_service,
117 'alertmanager': alertmanager_service,
118 'prometheus': prometheus_service,
119 'node-exporter': node_exporter_service,
120 'loki': loki_service,
121 'promtail': promtail_service,
122 'crash': crash_service,
123 'iscsi': iscsi_service,
124 }
125 return cephadm_services
126
127 def test_get_auth_entity(self):
128 mgr = FakeMgr()
129 cephadm_services = self._get_services(mgr)
130
131 for daemon_type in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]:
132 assert "client.%s.id1" % (daemon_type) == \
133 cephadm_services[daemon_type].get_auth_entity("id1", "host")
134 assert "client.%s.id1" % (daemon_type) == \
135 cephadm_services[daemon_type].get_auth_entity("id1", "")
136 assert "client.%s.id1" % (daemon_type) == \
137 cephadm_services[daemon_type].get_auth_entity("id1")
138
139 assert "client.crash.host" == \
140 cephadm_services["crash"].get_auth_entity("id1", "host")
141 with pytest.raises(OrchestratorError):
142 cephadm_services["crash"].get_auth_entity("id1", "")
143 cephadm_services["crash"].get_auth_entity("id1")
144
145 assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "host")
146 assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "")
147 assert "mon." == cephadm_services["mon"].get_auth_entity("id1")
148
149 assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1", "host")
150 assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1", "")
151 assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1")
152
153 for daemon_type in ["osd", "mds"]:
154 assert "%s.id1" % daemon_type == \
155 cephadm_services[daemon_type].get_auth_entity("id1", "host")
156 assert "%s.id1" % daemon_type == \
157 cephadm_services[daemon_type].get_auth_entity("id1", "")
158 assert "%s.id1" % daemon_type == \
159 cephadm_services[daemon_type].get_auth_entity("id1")
160
161 # services based on CephadmService shouldn't have get_auth_entity
162 with pytest.raises(AttributeError):
163 for daemon_type in ['grafana', 'alertmanager', 'prometheus', 'node-exporter', 'loki', 'promtail']:
164 cephadm_services[daemon_type].get_auth_entity("id1", "host")
165 cephadm_services[daemon_type].get_auth_entity("id1", "")
166 cephadm_services[daemon_type].get_auth_entity("id1")
167
168
169 class TestISCSIService:
170
171 mgr = FakeMgr()
172 iscsi_service = IscsiService(mgr)
173
174 iscsi_spec = IscsiServiceSpec(service_type='iscsi', service_id="a")
175 iscsi_spec.daemon_type = "iscsi"
176 iscsi_spec.daemon_id = "a"
177 iscsi_spec.spec = MagicMock()
178 iscsi_spec.spec.daemon_type = "iscsi"
179 iscsi_spec.spec.ssl_cert = ''
180 iscsi_spec.api_user = "user"
181 iscsi_spec.api_password = "password"
182 iscsi_spec.api_port = 5000
183 iscsi_spec.api_secure = False
184 iscsi_spec.ssl_cert = "cert"
185 iscsi_spec.ssl_key = "key"
186
187 mgr.spec_store = MagicMock()
188 mgr.spec_store.all_specs.get.return_value = iscsi_spec
189
190 def test_iscsi_client_caps(self):
191
192 iscsi_daemon_spec = CephadmDaemonDeploySpec(
193 host='host', daemon_id='a', service_name=self.iscsi_spec.service_name())
194
195 self.iscsi_service.prepare_create(iscsi_daemon_spec)
196
197 expected_caps = ['mon',
198 'profile rbd, allow command "osd blocklist", allow command "config-key get" with "key" prefix "iscsi/"',
199 'mgr', 'allow command "service status"',
200 'osd', 'allow rwx']
201
202 expected_call = call({'prefix': 'auth get-or-create',
203 'entity': 'client.iscsi.a',
204 'caps': expected_caps})
205 expected_call2 = call({'prefix': 'auth caps',
206 'entity': 'client.iscsi.a',
207 'caps': expected_caps})
208 expected_call3 = call({'prefix': 'auth get',
209 'entity': 'client.iscsi.a'})
210
211 assert expected_call in self.mgr.mon_command.mock_calls
212 assert expected_call2 in self.mgr.mon_command.mock_calls
213 assert expected_call3 in self.mgr.mon_command.mock_calls
214
215 @patch('cephadm.utils.resolve_ip')
216 def test_iscsi_dashboard_config(self, mock_resolve_ip):
217
218 self.mgr.check_mon_command = MagicMock()
219 self.mgr.check_mon_command.return_value = ('', '{"gateways": {}}', '')
220
221 # Case 1: use IPV4 address
222 id1 = DaemonDescription(daemon_type='iscsi', hostname="testhost1",
223 daemon_id="a", ip='192.168.1.1')
224 daemon_list = [id1]
225 mock_resolve_ip.return_value = '192.168.1.1'
226
227 self.iscsi_service.config_dashboard(daemon_list)
228
229 dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add',
230 'name': 'testhost1'},
231 'http://user:password@192.168.1.1:5000')
232
233 assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls
234
235 # Case 2: use IPV6 address
236 self.mgr.check_mon_command.reset_mock()
237
238 id1 = DaemonDescription(daemon_type='iscsi', hostname="testhost1",
239 daemon_id="a", ip='FEDC:BA98:7654:3210:FEDC:BA98:7654:3210')
240 mock_resolve_ip.return_value = 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210'
241
242 self.iscsi_service.config_dashboard(daemon_list)
243
244 dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add',
245 'name': 'testhost1'},
246 'http://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000')
247
248 assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls
249
250 # Case 3: IPV6 Address . Secure protocol
251 self.mgr.check_mon_command.reset_mock()
252
253 self.iscsi_spec.api_secure = True
254
255 self.iscsi_service.config_dashboard(daemon_list)
256
257 dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add',
258 'name': 'testhost1'},
259 'https://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000')
260
261 assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls
262
263 @patch("cephadm.serve.CephadmServe._run_cephadm")
264 @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
265 @patch("cephadm.services.iscsi.IscsiService.get_trusted_ips")
266 def test_iscsi_config(self, _get_trusted_ips, _get_name, _run_cephadm, cephadm_module: CephadmOrchestrator):
267
268 iscsi_daemon_id = 'testpool.test.qwert'
269 trusted_ips = '1.1.1.1,2.2.2.2'
270 api_port = 3456
271 api_user = 'test-user'
272 api_password = 'test-password'
273 pool = 'testpool'
274 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
275 _get_name.return_value = iscsi_daemon_id
276 _get_trusted_ips.return_value = trusted_ips
277
278 iscsi_gateway_conf = f"""# This file is generated by cephadm.
279 [config]
280 cluster_client_name = client.iscsi.{iscsi_daemon_id}
281 pool = {pool}
282 trusted_ip_list = {trusted_ips}
283 minimum_gateways = 1
284 api_port = {api_port}
285 api_user = {api_user}
286 api_password = {api_password}
287 api_secure = False
288 log_to_stderr = True
289 log_to_stderr_prefix = debug
290 log_to_file = False"""
291
292 with with_host(cephadm_module, 'test'):
293 with with_service(cephadm_module, IscsiServiceSpec(service_id=pool,
294 api_port=api_port,
295 api_user=api_user,
296 api_password=api_password,
297 pool=pool,
298 trusted_ip_list=trusted_ips)):
299 _run_cephadm.assert_called_with(
300 'test',
301 f'iscsi.{iscsi_daemon_id}',
302 'deploy',
303 [
304 '--name', f'iscsi.{iscsi_daemon_id}',
305 '--meta-json', f'{"{"}"service_name": "iscsi.{pool}", "ports": [{api_port}], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null{"}"}',
306 '--config-json', '-', '--tcp-ports', '3456'
307 ],
308 stdin=json.dumps({"config": "", "keyring": f"[client.iscsi.{iscsi_daemon_id}]\nkey = None\n", "files": {"iscsi-gateway.cfg": iscsi_gateway_conf}}),
309 image='')
310
311
312 class TestMonitoring:
313 def _get_config(self, url: str) -> str:
314 return f"""
315 # This file is generated by cephadm.
316 # See https://prometheus.io/docs/alerting/configuration/ for documentation.
317
318 global:
319 resolve_timeout: 5m
320 http_config:
321 tls_config:
322 insecure_skip_verify: true
323
324 route:
325 receiver: 'default'
326 routes:
327 - group_by: ['alertname']
328 group_wait: 10s
329 group_interval: 10s
330 repeat_interval: 1h
331 receiver: 'ceph-dashboard'
332
333 receivers:
334 - name: 'default'
335 webhook_configs:
336 - name: 'ceph-dashboard'
337 webhook_configs:
338 - url: '{url}/api/prometheus_receiver'
339 """
340
341 @pytest.mark.parametrize(
342 "dashboard_url,expected_yaml_url",
343 [
344 # loopback address
345 ("http://[::1]:8080", "http://localhost:8080"),
346 # IPv6
347 (
348 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
349 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
350 ),
351 # IPv6 to FQDN
352 (
353 "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080",
354 "http://mgr.fqdn.test:8080",
355 ),
356 # IPv4
357 (
358 "http://192.168.0.123:8080",
359 "http://192.168.0.123:8080",
360 ),
361 # IPv4 to FQDN
362 (
363 "http://192.168.0.123:8080",
364 "http://mgr.fqdn.test:8080",
365 ),
366 ],
367 )
368 @patch("cephadm.serve.CephadmServe._run_cephadm")
369 @patch("mgr_module.MgrModule.get")
370 @patch("socket.getfqdn")
371 def test_alertmanager_config(
372 self,
373 mock_getfqdn,
374 mock_get,
375 _run_cephadm,
376 cephadm_module: CephadmOrchestrator,
377 dashboard_url,
378 expected_yaml_url,
379 ):
380 _run_cephadm.side_effect = async_side_effect(("{}", "", 0))
381 mock_get.return_value = {"services": {"dashboard": dashboard_url}}
382 purl = urllib.parse.urlparse(expected_yaml_url)
383 mock_getfqdn.return_value = purl.hostname
384
385 with with_host(cephadm_module, "test"):
386 with with_service(cephadm_module, AlertManagerSpec()):
387 y = dedent(self._get_config(expected_yaml_url)).lstrip()
388 _run_cephadm.assert_called_with(
389 "test",
390 "alertmanager.test",
391 "deploy",
392 [
393 "--name",
394 "alertmanager.test",
395 "--meta-json",
396 ('{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, '
397 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
398 "--config-json",
399 "-",
400 "--tcp-ports",
401 "9093 9094",
402 ],
403 stdin=json.dumps(
404 {"files": {"alertmanager.yml": y}, "peers": []}
405 ),
406 image="",
407 )
408
409 @patch("cephadm.serve.CephadmServe._run_cephadm")
410 @patch("socket.getfqdn")
411 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
412 @patch("cephadm.services.monitoring.password_hash", lambda password: 'fake_password')
413 def test_alertmanager_config_security_enabled(self, _get_fqdn, _run_cephadm, cephadm_module: CephadmOrchestrator):
414 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
415
416 fqdn = 'host1.test'
417 _get_fqdn.return_value = fqdn
418
419 def gen_cert(host, addr):
420 return ('mycert', 'mykey')
421
422 def get_root_cert():
423 return 'my_root_cert'
424
425 with with_host(cephadm_module, 'test'):
426 cephadm_module.secure_monitoring_stack = True
427 cephadm_module.alertmanager_web_password = 'fake_password'
428 cephadm_module.alertmanager_web_user = 'admin'
429 cephadm_module.http_server.service_discovery.ssl_certs.generate_cert = MagicMock(side_effect=gen_cert)
430 cephadm_module.http_server.service_discovery.ssl_certs.get_root_cert = MagicMock(side_effect=get_root_cert)
431 with with_service(cephadm_module, AlertManagerSpec()):
432
433 y = dedent(f"""
434 # This file is generated by cephadm.
435 # See https://prometheus.io/docs/alerting/configuration/ for documentation.
436
437 global:
438 resolve_timeout: 5m
439 http_config:
440 tls_config:
441 ca_file: root_cert.pem
442
443 route:
444 receiver: 'default'
445 routes:
446 - group_by: ['alertname']
447 group_wait: 10s
448 group_interval: 10s
449 repeat_interval: 1h
450 receiver: 'ceph-dashboard'
451
452 receivers:
453 - name: 'default'
454 webhook_configs:
455 - name: 'ceph-dashboard'
456 webhook_configs:
457 - url: 'http://{fqdn}:8080/api/prometheus_receiver'
458 """).lstrip()
459
460 web_config = dedent("""
461 tls_server_config:
462 cert_file: alertmanager.crt
463 key_file: alertmanager.key
464 basic_auth_users:
465 admin: fake_password""").lstrip()
466
467 _run_cephadm.assert_called_with(
468 'test',
469 'alertmanager.test',
470 'deploy',
471 [
472 '--name', 'alertmanager.test',
473 '--meta-json', '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}',
474 '--config-json', '-', '--tcp-ports', '9093 9094'
475 ],
476 stdin=json.dumps({
477 "files": {
478 "alertmanager.yml": y,
479 'alertmanager.crt': 'mycert',
480 'alertmanager.key': 'mykey',
481 'web.yml': web_config,
482 'root_cert.pem': 'my_root_cert'
483 },
484 'peers': [],
485 'web_config': '/etc/alertmanager/web.yml'
486 }),
487 image='')
488
489 @patch("cephadm.serve.CephadmServe._run_cephadm")
490 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
491 def test_prometheus_config_security_disabled(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
492 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
493 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), rgw_frontend_type='beast')
494 with with_host(cephadm_module, 'test'):
495 with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \
496 with_service(cephadm_module, CephExporterSpec('ceph-exporter')) as _, \
497 with_service(cephadm_module, s) as _, \
498 with_service(cephadm_module, AlertManagerSpec('alertmanager')) as _, \
499 with_service(cephadm_module, IngressSpec(service_id='ingress',
500 frontend_port=8089,
501 monitor_port=8999,
502 monitor_user='admin',
503 monitor_password='12345',
504 keepalived_password='12345',
505 virtual_ip="1.2.3.4/32",
506 backend_service='rgw.foo')) as _, \
507 with_service(cephadm_module, PrometheusSpec('prometheus')) as _:
508
509 y = dedent("""
510 # This file is generated by cephadm.
511 global:
512 scrape_interval: 10s
513 evaluation_interval: 10s
514 rule_files:
515 - /etc/prometheus/alerting/*
516
517 alerting:
518 alertmanagers:
519 - scheme: http
520 http_sd_configs:
521 - url: http://[::1]:8765/sd/prometheus/sd-config?service=alertmanager
522
523 scrape_configs:
524 - job_name: 'ceph'
525 honor_labels: true
526 http_sd_configs:
527 - url: http://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
528
529 - job_name: 'node'
530 http_sd_configs:
531 - url: http://[::1]:8765/sd/prometheus/sd-config?service=node-exporter
532
533 - job_name: 'haproxy'
534 http_sd_configs:
535 - url: http://[::1]:8765/sd/prometheus/sd-config?service=haproxy
536
537 - job_name: 'ceph-exporter'
538 honor_labels: true
539 http_sd_configs:
540 - url: http://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter
541 """).lstrip()
542
543 _run_cephadm.assert_called_with(
544 'test',
545 'prometheus.test',
546 'deploy',
547 [
548 '--name', 'prometheus.test',
549 '--meta-json',
550 ('{"service_name": "prometheus", "ports": [9095], "ip": null, "deployed_by": [], "rank": null, '
551 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
552 '--config-json', '-',
553 '--tcp-ports', '9095'
554 ],
555 stdin=json.dumps({"files": {"prometheus.yml": y,
556 "/etc/prometheus/alerting/custom_alerts.yml": ""},
557 'retention_time': '15d',
558 'retention_size': '0'}),
559 image='')
560
561 @patch("cephadm.serve.CephadmServe._run_cephadm")
562 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
563 @patch("cephadm.services.monitoring.password_hash", lambda password: 'fake_password')
564 def test_prometheus_config_security_enabled(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
565 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
566 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), rgw_frontend_type='beast')
567
568 def gen_cert(host, addr):
569 return ('mycert', 'mykey')
570
571 with with_host(cephadm_module, 'test'):
572 cephadm_module.secure_monitoring_stack = True
573 cephadm_module.http_server.service_discovery.username = 'admin'
574 cephadm_module.http_server.service_discovery.password = 'fake_password'
575 cephadm_module.http_server.service_discovery.ssl_certs.generate_cert = MagicMock(
576 side_effect=gen_cert)
577 with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \
578 with_service(cephadm_module, s) as _, \
579 with_service(cephadm_module, AlertManagerSpec('alertmanager')) as _, \
580 with_service(cephadm_module, IngressSpec(service_id='ingress',
581 frontend_port=8089,
582 monitor_port=8999,
583 monitor_user='admin',
584 monitor_password='12345',
585 keepalived_password='12345',
586 virtual_ip="1.2.3.4/32",
587 backend_service='rgw.foo')) as _, \
588 with_service(cephadm_module, PrometheusSpec('prometheus')) as _:
589
590 web_config = dedent("""
591 tls_server_config:
592 cert_file: prometheus.crt
593 key_file: prometheus.key
594 basic_auth_users:
595 admin: fake_password""").lstrip()
596
597 y = dedent("""
598 # This file is generated by cephadm.
599 global:
600 scrape_interval: 10s
601 evaluation_interval: 10s
602 rule_files:
603 - /etc/prometheus/alerting/*
604
605 alerting:
606 alertmanagers:
607 - scheme: https
608 basic_auth:
609 username: admin
610 password: admin
611 tls_config:
612 ca_file: root_cert.pem
613 http_sd_configs:
614 - url: https://[::1]:8765/sd/prometheus/sd-config?service=alertmanager
615 basic_auth:
616 username: admin
617 password: fake_password
618 tls_config:
619 ca_file: root_cert.pem
620
621 scrape_configs:
622 - job_name: 'ceph'
623 scheme: https
624 tls_config:
625 ca_file: mgr_prometheus_cert.pem
626 honor_labels: true
627 http_sd_configs:
628 - url: https://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
629 basic_auth:
630 username: admin
631 password: fake_password
632 tls_config:
633 ca_file: root_cert.pem
634
635 - job_name: 'node'
636 scheme: https
637 tls_config:
638 ca_file: root_cert.pem
639 http_sd_configs:
640 - url: https://[::1]:8765/sd/prometheus/sd-config?service=node-exporter
641 basic_auth:
642 username: admin
643 password: fake_password
644 tls_config:
645 ca_file: root_cert.pem
646
647 - job_name: 'haproxy'
648 scheme: https
649 tls_config:
650 ca_file: root_cert.pem
651 http_sd_configs:
652 - url: https://[::1]:8765/sd/prometheus/sd-config?service=haproxy
653 basic_auth:
654 username: admin
655 password: fake_password
656 tls_config:
657 ca_file: root_cert.pem
658
659 - job_name: 'ceph-exporter'
660 honor_labels: true
661 scheme: https
662 tls_config:
663 ca_file: root_cert.pem
664 http_sd_configs:
665 - url: https://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter
666 basic_auth:
667 username: admin
668 password: fake_password
669 tls_config:
670 ca_file: root_cert.pem
671 """).lstrip()
672
673 _run_cephadm.assert_called_with(
674 'test',
675 'prometheus.test',
676 'deploy',
677 [
678 '--name', 'prometheus.test',
679 '--meta-json',
680 '{"service_name": "prometheus", "ports": [9095], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}',
681 '--config-json', '-',
682 '--tcp-ports', '9095'
683 ],
684 stdin=json.dumps({
685 'files': {
686 'prometheus.yml': y,
687 'root_cert.pem': '',
688 'mgr_prometheus_cert.pem': '',
689 'web.yml': web_config,
690 'prometheus.crt': 'mycert',
691 'prometheus.key': 'mykey',
692 "/etc/prometheus/alerting/custom_alerts.yml": "",
693 },
694 'retention_time': '15d',
695 'retention_size': '0',
696 'web_config': '/etc/prometheus/web.yml'}),
697 image=''
698 )
699
700 @patch("cephadm.serve.CephadmServe._run_cephadm")
701 def test_loki_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
702 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
703
704 with with_host(cephadm_module, 'test'):
705 with with_service(cephadm_module, MonitoringSpec('loki')) as _:
706
707 y = dedent("""
708 # This file is generated by cephadm.
709 auth_enabled: false
710
711 server:
712 http_listen_port: 3100
713 grpc_listen_port: 8080
714
715 common:
716 path_prefix: /tmp/loki
717 storage:
718 filesystem:
719 chunks_directory: /tmp/loki/chunks
720 rules_directory: /tmp/loki/rules
721 replication_factor: 1
722 ring:
723 instance_addr: 127.0.0.1
724 kvstore:
725 store: inmemory
726
727 schema_config:
728 configs:
729 - from: 2020-10-24
730 store: boltdb-shipper
731 object_store: filesystem
732 schema: v11
733 index:
734 prefix: index_
735 period: 24h""").lstrip()
736
737 _run_cephadm.assert_called_with(
738 'test',
739 'loki.test',
740 'deploy',
741 [
742 '--name', 'loki.test',
743 '--meta-json',
744 ('{"service_name": "loki", "ports": [3100], "ip": null, "deployed_by": [], "rank": null, '
745 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
746 '--config-json', '-',
747 '--tcp-ports', '3100'
748 ],
749 stdin=json.dumps({"files": {"loki.yml": y}}),
750 image='')
751
752 @patch("cephadm.serve.CephadmServe._run_cephadm")
753 def test_promtail_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
754 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
755
756 with with_host(cephadm_module, 'test'):
757 with with_service(cephadm_module, ServiceSpec('mgr')) as _, \
758 with_service(cephadm_module, MonitoringSpec('promtail')) as _:
759
760 y = dedent("""
761 # This file is generated by cephadm.
762 server:
763 http_listen_port: 9080
764 grpc_listen_port: 0
765
766 positions:
767 filename: /tmp/positions.yaml
768
769 clients:
770 - url: http://:3100/loki/api/v1/push
771
772 scrape_configs:
773 - job_name: system
774 static_configs:
775 - labels:
776 job: Cluster Logs
777 __path__: /var/log/ceph/**/*.log""").lstrip()
778
779 _run_cephadm.assert_called_with(
780 'test',
781 'promtail.test',
782 'deploy',
783 [
784 '--name', 'promtail.test',
785 '--meta-json',
786 ('{"service_name": "promtail", "ports": [9080], "ip": null, "deployed_by": [], "rank": null, '
787 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
788 '--config-json', '-',
789 '--tcp-ports', '9080'
790 ],
791 stdin=json.dumps({"files": {"promtail.yml": y}}),
792 image='')
793
794 @patch("cephadm.serve.CephadmServe._run_cephadm")
795 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4')
796 @patch("cephadm.services.monitoring.verify_tls", lambda *_: None)
797 def test_grafana_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
798 _run_cephadm.side_effect = async_side_effect(("{}", "", 0))
799
800 with with_host(cephadm_module, "test"):
801 cephadm_module.set_store("test/grafana_crt", grafana_cert)
802 cephadm_module.set_store("test/grafana_key", grafana_key)
803 with with_service(
804 cephadm_module, PrometheusSpec("prometheus")
805 ) as _, with_service(cephadm_module, ServiceSpec("mgr")) as _, with_service(
806 cephadm_module, GrafanaSpec("grafana")
807 ) as _:
808 files = {
809 'grafana.ini': dedent("""
810 # This file is generated by cephadm.
811 [users]
812 default_theme = light
813 [auth.anonymous]
814 enabled = true
815 org_name = 'Main Org.'
816 org_role = 'Viewer'
817 [server]
818 domain = 'bootstrap.storage.lab'
819 protocol = https
820 cert_file = /etc/grafana/certs/cert_file
821 cert_key = /etc/grafana/certs/cert_key
822 http_port = 3000
823 http_addr =
824 [snapshots]
825 external_enabled = false
826 [security]
827 disable_initial_admin_creation = true
828 cookie_secure = true
829 cookie_samesite = none
830 allow_embedding = true""").lstrip(), # noqa: W291
831 'provisioning/datasources/ceph-dashboard.yml': dedent("""
832 # This file is generated by cephadm.
833 apiVersion: 1
834
835 deleteDatasources:
836 - name: 'Dashboard1'
837 orgId: 1
838
839 datasources:
840 - name: 'Dashboard1'
841 type: 'prometheus'
842 access: 'proxy'
843 orgId: 1
844 url: 'http://[1::4]:9095'
845 basicAuth: false
846 isDefault: true
847 editable: false
848
849 - name: 'Loki'
850 type: 'loki'
851 access: 'proxy'
852 url: ''
853 basicAuth: false
854 isDefault: false
855 editable: false""").lstrip(),
856 'certs/cert_file': dedent(f"""
857 # generated by cephadm\n{grafana_cert}""").lstrip(),
858 'certs/cert_key': dedent(f"""
859 # generated by cephadm\n{grafana_key}""").lstrip(),
860 }
861
862 _run_cephadm.assert_called_with(
863 'test',
864 'grafana.test',
865 'deploy',
866 [
867 '--name', 'grafana.test',
868 '--meta-json',
869 ('{"service_name": "grafana", "ports": [3000], "ip": null, "deployed_by": [], "rank": null, '
870 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
871 '--config-json', '-', '--tcp-ports', '3000'],
872 stdin=json.dumps({"files": files}),
873 image='')
874
875 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
876 def test_grafana_initial_admin_pw(self, cephadm_module: CephadmOrchestrator):
877 with with_host(cephadm_module, 'test'):
878 with with_service(cephadm_module, ServiceSpec('mgr')) as _, \
879 with_service(cephadm_module, GrafanaSpec(initial_admin_password='secure')):
880 out = cephadm_module.cephadm_services['grafana'].generate_config(
881 CephadmDaemonDeploySpec('test', 'daemon', 'grafana'))
882 assert out == (
883 {
884 'files':
885 {
886 'grafana.ini':
887 '# This file is generated by cephadm.\n'
888 '[users]\n'
889 ' default_theme = light\n'
890 '[auth.anonymous]\n'
891 ' enabled = true\n'
892 " org_name = 'Main Org.'\n"
893 " org_role = 'Viewer'\n"
894 '[server]\n'
895 " domain = 'bootstrap.storage.lab'\n"
896 ' protocol = https\n'
897 ' cert_file = /etc/grafana/certs/cert_file\n'
898 ' cert_key = /etc/grafana/certs/cert_key\n'
899 ' http_port = 3000\n'
900 ' http_addr = \n'
901 '[snapshots]\n'
902 ' external_enabled = false\n'
903 '[security]\n'
904 ' admin_user = admin\n'
905 ' admin_password = secure\n'
906 ' cookie_secure = true\n'
907 ' cookie_samesite = none\n'
908 ' allow_embedding = true',
909 'provisioning/datasources/ceph-dashboard.yml':
910 "# This file is generated by cephadm.\n"
911 "apiVersion: 1\n\n"
912 'deleteDatasources:\n\n'
913 'datasources:\n\n'
914 " - name: 'Loki'\n"
915 " type: 'loki'\n"
916 " access: 'proxy'\n"
917 " url: ''\n"
918 ' basicAuth: false\n'
919 ' isDefault: false\n'
920 ' editable: false',
921 'certs/cert_file': ANY,
922 'certs/cert_key': ANY}}, ['secure_monitoring_stack:False'])
923
924 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
925 def test_grafana_no_anon_access(self, cephadm_module: CephadmOrchestrator):
926 # with anonymous_access set to False, expecting the [auth.anonymous] section
927 # to not be present in the grafana config. Note that we require an initial_admin_password
928 # to be provided when anonymous_access is False
929 with with_host(cephadm_module, 'test'):
930 with with_service(cephadm_module, ServiceSpec('mgr')) as _, \
931 with_service(cephadm_module, GrafanaSpec(anonymous_access=False, initial_admin_password='secure')):
932 out = cephadm_module.cephadm_services['grafana'].generate_config(
933 CephadmDaemonDeploySpec('test', 'daemon', 'grafana'))
934 assert out == (
935 {
936 'files':
937 {
938 'grafana.ini':
939 '# This file is generated by cephadm.\n'
940 '[users]\n'
941 ' default_theme = light\n'
942 '[server]\n'
943 " domain = 'bootstrap.storage.lab'\n"
944 ' protocol = https\n'
945 ' cert_file = /etc/grafana/certs/cert_file\n'
946 ' cert_key = /etc/grafana/certs/cert_key\n'
947 ' http_port = 3000\n'
948 ' http_addr = \n'
949 '[snapshots]\n'
950 ' external_enabled = false\n'
951 '[security]\n'
952 ' admin_user = admin\n'
953 ' admin_password = secure\n'
954 ' cookie_secure = true\n'
955 ' cookie_samesite = none\n'
956 ' allow_embedding = true',
957 'provisioning/datasources/ceph-dashboard.yml':
958 "# This file is generated by cephadm.\n"
959 "apiVersion: 1\n\n"
960 'deleteDatasources:\n\n'
961 'datasources:\n\n'
962 " - name: 'Loki'\n"
963 " type: 'loki'\n"
964 " access: 'proxy'\n"
965 " url: ''\n"
966 ' basicAuth: false\n'
967 ' isDefault: false\n'
968 ' editable: false',
969 'certs/cert_file': ANY,
970 'certs/cert_key': ANY}}, ['secure_monitoring_stack:False'])
971
972 @patch("cephadm.serve.CephadmServe._run_cephadm")
973 def test_monitoring_ports(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
974 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
975
976 with with_host(cephadm_module, 'test'):
977
978 yaml_str = """service_type: alertmanager
979 service_name: alertmanager
980 placement:
981 count: 1
982 spec:
983 port: 4200
984 """
985 yaml_file = yaml.safe_load(yaml_str)
986 spec = ServiceSpec.from_json(yaml_file)
987
988 with patch("cephadm.services.monitoring.AlertmanagerService.generate_config", return_value=({}, [])):
989 with with_service(cephadm_module, spec):
990
991 CephadmServe(cephadm_module)._check_daemons()
992
993 _run_cephadm.assert_called_with(
994 'test', 'alertmanager.test', 'deploy', [
995 '--name', 'alertmanager.test',
996 '--meta-json', ('{"service_name": "alertmanager", "ports": [4200, 9094], "ip": null, "deployed_by": [], "rank": null, '
997 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
998 '--config-json', '-',
999 '--tcp-ports', '4200 9094',
1000 '--reconfig'
1001 ],
1002 stdin='{}',
1003 image='')
1004
1005
1006 class TestRGWService:
1007
1008 @pytest.mark.parametrize(
1009 "frontend, ssl, extra_args, expected",
1010 [
1011 ('beast', False, ['tcp_nodelay=1'],
1012 'beast endpoint=[fd00:fd00:fd00:3000::1]:80 tcp_nodelay=1'),
1013 ('beast', True, ['tcp_nodelay=0', 'max_header_size=65536'],
1014 'beast ssl_endpoint=[fd00:fd00:fd00:3000::1]:443 ssl_certificate=config://rgw/cert/rgw.foo tcp_nodelay=0 max_header_size=65536'),
1015 ('civetweb', False, [], 'civetweb port=[fd00:fd00:fd00:3000::1]:80'),
1016 ('civetweb', True, None,
1017 'civetweb port=[fd00:fd00:fd00:3000::1]:443s ssl_certificate=config://rgw/cert/rgw.foo'),
1018 ]
1019 )
1020 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
1021 def test_rgw_update(self, frontend, ssl, extra_args, expected, cephadm_module: CephadmOrchestrator):
1022 with with_host(cephadm_module, 'host1'):
1023 cephadm_module.cache.update_host_networks('host1', {
1024 'fd00:fd00:fd00:3000::/64': {
1025 'if0': ['fd00:fd00:fd00:3000::1']
1026 }
1027 })
1028 s = RGWSpec(service_id="foo",
1029 networks=['fd00:fd00:fd00:3000::/64'],
1030 ssl=ssl,
1031 rgw_frontend_type=frontend,
1032 rgw_frontend_extra_args=extra_args)
1033 with with_service(cephadm_module, s) as dds:
1034 _, f, _ = cephadm_module.check_mon_command({
1035 'prefix': 'config get',
1036 'who': f'client.{dds[0]}',
1037 'key': 'rgw_frontends',
1038 })
1039 assert f == expected
1040
1041
1042 class TestMonService:
1043
1044 def test_set_crush_locations(self, cephadm_module: CephadmOrchestrator):
1045 mgr = FakeMgr()
1046 mon_service = MonService(mgr)
1047 mon_spec = ServiceSpec(service_type='mon', crush_locations={'vm-00': ['datacenter=a', 'rack=1'], 'vm-01': ['datacenter=a'], 'vm-02': ['datacenter=b', 'rack=3']})
1048
1049 mon_daemons = [
1050 DaemonDescription(daemon_type='mon', daemon_id='vm-00', hostname='vm-00'),
1051 DaemonDescription(daemon_type='mon', daemon_id='vm-01', hostname='vm-01'),
1052 DaemonDescription(daemon_type='mon', daemon_id='vm-02', hostname='vm-02')
1053 ]
1054 mon_service.set_crush_locations(mon_daemons, mon_spec)
1055 assert 'vm-00' in mgr.set_mon_crush_locations
1056 assert mgr.set_mon_crush_locations['vm-00'] == ['datacenter=a', 'rack=1']
1057 assert 'vm-01' in mgr.set_mon_crush_locations
1058 assert mgr.set_mon_crush_locations['vm-01'] == ['datacenter=a']
1059 assert 'vm-02' in mgr.set_mon_crush_locations
1060 assert mgr.set_mon_crush_locations['vm-02'] == ['datacenter=b', 'rack=3']
1061
1062
1063 class TestSNMPGateway:
1064
1065 @patch("cephadm.serve.CephadmServe._run_cephadm")
1066 def test_snmp_v2c_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1067 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1068
1069 spec = SNMPGatewaySpec(
1070 snmp_version='V2c',
1071 snmp_destination='192.168.1.1:162',
1072 credentials={
1073 'snmp_community': 'public'
1074 })
1075
1076 config = {
1077 "destination": spec.snmp_destination,
1078 "snmp_version": spec.snmp_version,
1079 "snmp_community": spec.credentials.get('snmp_community')
1080 }
1081
1082 with with_host(cephadm_module, 'test'):
1083 with with_service(cephadm_module, spec):
1084 _run_cephadm.assert_called_with(
1085 'test',
1086 'snmp-gateway.test',
1087 'deploy',
1088 [
1089 '--name', 'snmp-gateway.test',
1090 '--meta-json',
1091 ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, '
1092 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1093 '--config-json', '-',
1094 '--tcp-ports', '9464'
1095 ],
1096 stdin=json.dumps(config),
1097 image=''
1098 )
1099
1100 @patch("cephadm.serve.CephadmServe._run_cephadm")
1101 def test_snmp_v2c_with_port(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1102 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1103
1104 spec = SNMPGatewaySpec(
1105 snmp_version='V2c',
1106 snmp_destination='192.168.1.1:162',
1107 credentials={
1108 'snmp_community': 'public'
1109 },
1110 port=9465)
1111
1112 config = {
1113 "destination": spec.snmp_destination,
1114 "snmp_version": spec.snmp_version,
1115 "snmp_community": spec.credentials.get('snmp_community')
1116 }
1117
1118 with with_host(cephadm_module, 'test'):
1119 with with_service(cephadm_module, spec):
1120 _run_cephadm.assert_called_with(
1121 'test',
1122 'snmp-gateway.test',
1123 'deploy',
1124 [
1125 '--name', 'snmp-gateway.test',
1126 '--meta-json',
1127 ('{"service_name": "snmp-gateway", "ports": [9465], "ip": null, "deployed_by": [], "rank": null, '
1128 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1129 '--config-json', '-',
1130 '--tcp-ports', '9465'
1131 ],
1132 stdin=json.dumps(config),
1133 image=''
1134 )
1135
1136 @patch("cephadm.serve.CephadmServe._run_cephadm")
1137 def test_snmp_v3nopriv_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1138 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1139
1140 spec = SNMPGatewaySpec(
1141 snmp_version='V3',
1142 snmp_destination='192.168.1.1:162',
1143 engine_id='8000C53F00000000',
1144 credentials={
1145 'snmp_v3_auth_username': 'myuser',
1146 'snmp_v3_auth_password': 'mypassword'
1147 })
1148
1149 config = {
1150 'destination': spec.snmp_destination,
1151 'snmp_version': spec.snmp_version,
1152 'snmp_v3_auth_protocol': 'SHA',
1153 'snmp_v3_auth_username': 'myuser',
1154 'snmp_v3_auth_password': 'mypassword',
1155 'snmp_v3_engine_id': '8000C53F00000000'
1156 }
1157
1158 with with_host(cephadm_module, 'test'):
1159 with with_service(cephadm_module, spec):
1160 _run_cephadm.assert_called_with(
1161 'test',
1162 'snmp-gateway.test',
1163 'deploy',
1164 [
1165 '--name', 'snmp-gateway.test',
1166 '--meta-json',
1167 ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, '
1168 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1169 '--config-json', '-',
1170 '--tcp-ports', '9464'
1171 ],
1172 stdin=json.dumps(config),
1173 image=''
1174 )
1175
1176 @patch("cephadm.serve.CephadmServe._run_cephadm")
1177 def test_snmp_v3priv_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1178 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1179
1180 spec = SNMPGatewaySpec(
1181 snmp_version='V3',
1182 snmp_destination='192.168.1.1:162',
1183 engine_id='8000C53F00000000',
1184 auth_protocol='MD5',
1185 privacy_protocol='AES',
1186 credentials={
1187 'snmp_v3_auth_username': 'myuser',
1188 'snmp_v3_auth_password': 'mypassword',
1189 'snmp_v3_priv_password': 'mysecret',
1190 })
1191
1192 config = {
1193 'destination': spec.snmp_destination,
1194 'snmp_version': spec.snmp_version,
1195 'snmp_v3_auth_protocol': 'MD5',
1196 'snmp_v3_auth_username': spec.credentials.get('snmp_v3_auth_username'),
1197 'snmp_v3_auth_password': spec.credentials.get('snmp_v3_auth_password'),
1198 'snmp_v3_engine_id': '8000C53F00000000',
1199 'snmp_v3_priv_protocol': spec.privacy_protocol,
1200 'snmp_v3_priv_password': spec.credentials.get('snmp_v3_priv_password'),
1201 }
1202
1203 with with_host(cephadm_module, 'test'):
1204 with with_service(cephadm_module, spec):
1205 _run_cephadm.assert_called_with(
1206 'test',
1207 'snmp-gateway.test',
1208 'deploy',
1209 [
1210 '--name', 'snmp-gateway.test',
1211 '--meta-json',
1212 ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, '
1213 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1214 '--config-json', '-',
1215 '--tcp-ports', '9464'
1216 ],
1217 stdin=json.dumps(config),
1218 image=''
1219 )
1220
1221
1222 class TestIngressService:
1223
1224 @patch("cephadm.inventory.Inventory.get_addr")
1225 @patch("cephadm.utils.resolve_ip")
1226 @patch("cephadm.inventory.HostCache.get_daemons_by_service")
1227 @patch("cephadm.serve.CephadmServe._run_cephadm")
1228 def test_ingress_config_nfs_multiple_nfs_same_rank(self, _run_cephadm, _get_daemons_by_service, _resolve_ip, _get_addr, cephadm_module: CephadmOrchestrator):
1229 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1230
1231 def fake_resolve_ip(hostname: str) -> str:
1232 if hostname == 'host1':
1233 return '192.168.122.111'
1234 elif hostname == 'host2':
1235 return '192.168.122.222'
1236 else:
1237 return 'xxx.xxx.xxx.xxx'
1238 _resolve_ip.side_effect = fake_resolve_ip
1239
1240 def fake_get_addr(hostname: str) -> str:
1241 return hostname
1242 _get_addr.side_effect = fake_get_addr
1243
1244 nfs_service = NFSServiceSpec(service_id="foo", placement=PlacementSpec(count=1, hosts=['host1', 'host2']),
1245 port=12049)
1246
1247 ispec = IngressSpec(service_type='ingress',
1248 service_id='nfs.foo',
1249 backend_service='nfs.foo',
1250 frontend_port=2049,
1251 monitor_port=9049,
1252 virtual_ip='192.168.122.100/24',
1253 monitor_user='admin',
1254 monitor_password='12345',
1255 keepalived_password='12345')
1256
1257 cephadm_module.spec_store._specs = {
1258 'nfs.foo': nfs_service,
1259 'ingress.nfs.foo': ispec
1260 }
1261 cephadm_module.spec_store.spec_created = {
1262 'nfs.foo': datetime_now(),
1263 'ingress.nfs.foo': datetime_now()
1264 }
1265
1266 # in both test cases we'll do here, we want only the ip
1267 # for the host1 nfs daemon as we'll end up giving that
1268 # one higher rank_generation but the same rank as the one
1269 # on host2
1270 haproxy_expected_conf = {
1271 'files':
1272 {
1273 'haproxy.cfg':
1274 '# This file is generated by cephadm.\n'
1275 'global\n'
1276 ' log 127.0.0.1 local2\n'
1277 ' chroot /var/lib/haproxy\n'
1278 ' pidfile /var/lib/haproxy/haproxy.pid\n'
1279 ' maxconn 8000\n'
1280 ' daemon\n'
1281 ' stats socket /var/lib/haproxy/stats\n\n'
1282 'defaults\n'
1283 ' mode tcp\n'
1284 ' log global\n'
1285 ' timeout queue 1m\n'
1286 ' timeout connect 10s\n'
1287 ' timeout client 1m\n'
1288 ' timeout server 1m\n'
1289 ' timeout check 10s\n'
1290 ' maxconn 8000\n\n'
1291 'frontend stats\n'
1292 ' mode http\n'
1293 ' bind 192.168.122.100:9049\n'
1294 ' bind host1:9049\n'
1295 ' stats enable\n'
1296 ' stats uri /stats\n'
1297 ' stats refresh 10s\n'
1298 ' stats auth admin:12345\n'
1299 ' http-request use-service prometheus-exporter if { path /metrics }\n'
1300 ' monitor-uri /health\n\n'
1301 'frontend frontend\n'
1302 ' bind 192.168.122.100:2049\n'
1303 ' default_backend backend\n\n'
1304 'backend backend\n'
1305 ' mode tcp\n'
1306 ' balance source\n'
1307 ' hash-type consistent\n'
1308 ' server nfs.foo.0 192.168.122.111:12049\n'
1309 }
1310 }
1311
1312 # verify we get the same cfg regardless of the order in which the nfs daemons are returned
1313 # in this case both nfs are rank 0, so it should only take the one with rank_generation 1 a.k.a
1314 # the one on host1
1315 nfs_daemons = [
1316 DaemonDescription(daemon_type='nfs', daemon_id='foo.0.1.host1.qwerty', hostname='host1', rank=0, rank_generation=1, ports=[12049]),
1317 DaemonDescription(daemon_type='nfs', daemon_id='foo.0.0.host2.abcdef', hostname='host2', rank=0, rank_generation=0, ports=[12049])
1318 ]
1319 _get_daemons_by_service.return_value = nfs_daemons
1320
1321 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
1322 CephadmDaemonDeploySpec(host='host1', daemon_id='ingress', service_name=ispec.service_name()))
1323
1324 assert haproxy_generated_conf[0] == haproxy_expected_conf
1325
1326 # swapping order now, should still pick out the one with the higher rank_generation
1327 # in this case both nfs are rank 0, so it should only take the one with rank_generation 1 a.k.a
1328 # the one on host1
1329 nfs_daemons = [
1330 DaemonDescription(daemon_type='nfs', daemon_id='foo.0.0.host2.abcdef', hostname='host2', rank=0, rank_generation=0, ports=[12049]),
1331 DaemonDescription(daemon_type='nfs', daemon_id='foo.0.1.host1.qwerty', hostname='host1', rank=0, rank_generation=1, ports=[12049])
1332 ]
1333 _get_daemons_by_service.return_value = nfs_daemons
1334
1335 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
1336 CephadmDaemonDeploySpec(host='host1', daemon_id='ingress', service_name=ispec.service_name()))
1337
1338 assert haproxy_generated_conf[0] == haproxy_expected_conf
1339
1340 @patch("cephadm.serve.CephadmServe._run_cephadm")
1341 def test_ingress_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1342 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1343
1344 with with_host(cephadm_module, 'test', addr='1.2.3.7'):
1345 cephadm_module.cache.update_host_networks('test', {
1346 '1.2.3.0/24': {
1347 'if0': ['1.2.3.4/32']
1348 }
1349 })
1350
1351 # the ingress backend
1352 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
1353 rgw_frontend_type='beast')
1354
1355 ispec = IngressSpec(service_type='ingress',
1356 service_id='test',
1357 backend_service='rgw.foo',
1358 frontend_port=8089,
1359 monitor_port=8999,
1360 monitor_user='admin',
1361 monitor_password='12345',
1362 keepalived_password='12345',
1363 virtual_interface_networks=['1.2.3.0/24'],
1364 virtual_ip="1.2.3.4/32")
1365 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
1366 # generate the keepalived conf based on the specified spec
1367 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
1368 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1369
1370 keepalived_expected_conf = {
1371 'files':
1372 {
1373 'keepalived.conf':
1374 '# This file is generated by cephadm.\n'
1375 'vrrp_script check_backend {\n '
1376 'script "/usr/bin/curl http://1.2.3.7:8999/health"\n '
1377 'weight -20\n '
1378 'interval 2\n '
1379 'rise 2\n '
1380 'fall 2\n}\n\n'
1381 'vrrp_instance VI_0 {\n '
1382 'state MASTER\n '
1383 'priority 100\n '
1384 'interface if0\n '
1385 'virtual_router_id 50\n '
1386 'advert_int 1\n '
1387 'authentication {\n '
1388 'auth_type PASS\n '
1389 'auth_pass 12345\n '
1390 '}\n '
1391 'unicast_src_ip 1.2.3.7\n '
1392 'unicast_peer {\n '
1393 '}\n '
1394 'virtual_ipaddress {\n '
1395 '1.2.3.4/32 dev if0\n '
1396 '}\n '
1397 'track_script {\n '
1398 'check_backend\n }\n'
1399 '}\n'
1400 }
1401 }
1402
1403 # check keepalived config
1404 assert keepalived_generated_conf[0] == keepalived_expected_conf
1405
1406 # generate the haproxy conf based on the specified spec
1407 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
1408 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1409
1410 haproxy_expected_conf = {
1411 'files':
1412 {
1413 'haproxy.cfg':
1414 '# This file is generated by cephadm.'
1415 '\nglobal\n log '
1416 '127.0.0.1 local2\n '
1417 'chroot /var/lib/haproxy\n '
1418 'pidfile /var/lib/haproxy/haproxy.pid\n '
1419 'maxconn 8000\n '
1420 'daemon\n '
1421 'stats socket /var/lib/haproxy/stats\n'
1422 '\ndefaults\n '
1423 'mode http\n '
1424 'log global\n '
1425 'option httplog\n '
1426 'option dontlognull\n '
1427 'option http-server-close\n '
1428 'option forwardfor except 127.0.0.0/8\n '
1429 'option redispatch\n '
1430 'retries 3\n '
1431 'timeout queue 20s\n '
1432 'timeout connect 5s\n '
1433 'timeout http-request 1s\n '
1434 'timeout http-keep-alive 5s\n '
1435 'timeout client 30s\n '
1436 'timeout server 30s\n '
1437 'timeout check 5s\n '
1438 'maxconn 8000\n'
1439 '\nfrontend stats\n '
1440 'mode http\n '
1441 'bind 1.2.3.4:8999\n '
1442 'bind 1.2.3.7:8999\n '
1443 'stats enable\n '
1444 'stats uri /stats\n '
1445 'stats refresh 10s\n '
1446 'stats auth admin:12345\n '
1447 'http-request use-service prometheus-exporter if { path /metrics }\n '
1448 'monitor-uri /health\n'
1449 '\nfrontend frontend\n '
1450 'bind 1.2.3.4:8089\n '
1451 'default_backend backend\n\n'
1452 'backend backend\n '
1453 'option forwardfor\n '
1454 'balance static-rr\n '
1455 'option httpchk HEAD / HTTP/1.0\n '
1456 'server '
1457 + haproxy_generated_conf[1][0] + ' 1.2.3.7:80 check weight 100\n'
1458 }
1459 }
1460
1461 assert haproxy_generated_conf[0] == haproxy_expected_conf
1462
1463 @patch("cephadm.serve.CephadmServe._run_cephadm")
1464 def test_ingress_config_ssl_rgw(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1465 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1466
1467 with with_host(cephadm_module, 'test'):
1468 cephadm_module.cache.update_host_networks('test', {
1469 '1.2.3.0/24': {
1470 'if0': ['1.2.3.4/32']
1471 }
1472 })
1473
1474 # the ingress backend
1475 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
1476 rgw_frontend_type='beast', rgw_frontend_port=443, ssl=True)
1477
1478 ispec = IngressSpec(service_type='ingress',
1479 service_id='test',
1480 backend_service='rgw.foo',
1481 frontend_port=8089,
1482 monitor_port=8999,
1483 monitor_user='admin',
1484 monitor_password='12345',
1485 keepalived_password='12345',
1486 virtual_interface_networks=['1.2.3.0/24'],
1487 virtual_ip="1.2.3.4/32")
1488 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
1489 # generate the keepalived conf based on the specified spec
1490 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
1491 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1492
1493 keepalived_expected_conf = {
1494 'files':
1495 {
1496 'keepalived.conf':
1497 '# This file is generated by cephadm.\n'
1498 'vrrp_script check_backend {\n '
1499 'script "/usr/bin/curl http://[1::4]:8999/health"\n '
1500 'weight -20\n '
1501 'interval 2\n '
1502 'rise 2\n '
1503 'fall 2\n}\n\n'
1504 'vrrp_instance VI_0 {\n '
1505 'state MASTER\n '
1506 'priority 100\n '
1507 'interface if0\n '
1508 'virtual_router_id 50\n '
1509 'advert_int 1\n '
1510 'authentication {\n '
1511 'auth_type PASS\n '
1512 'auth_pass 12345\n '
1513 '}\n '
1514 'unicast_src_ip 1::4\n '
1515 'unicast_peer {\n '
1516 '}\n '
1517 'virtual_ipaddress {\n '
1518 '1.2.3.4/32 dev if0\n '
1519 '}\n '
1520 'track_script {\n '
1521 'check_backend\n }\n'
1522 '}\n'
1523 }
1524 }
1525
1526 # check keepalived config
1527 assert keepalived_generated_conf[0] == keepalived_expected_conf
1528
1529 # generate the haproxy conf based on the specified spec
1530 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
1531 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1532
1533 haproxy_expected_conf = {
1534 'files':
1535 {
1536 'haproxy.cfg':
1537 '# This file is generated by cephadm.'
1538 '\nglobal\n log '
1539 '127.0.0.1 local2\n '
1540 'chroot /var/lib/haproxy\n '
1541 'pidfile /var/lib/haproxy/haproxy.pid\n '
1542 'maxconn 8000\n '
1543 'daemon\n '
1544 'stats socket /var/lib/haproxy/stats\n'
1545 '\ndefaults\n '
1546 'mode http\n '
1547 'log global\n '
1548 'option httplog\n '
1549 'option dontlognull\n '
1550 'option http-server-close\n '
1551 'option forwardfor except 127.0.0.0/8\n '
1552 'option redispatch\n '
1553 'retries 3\n '
1554 'timeout queue 20s\n '
1555 'timeout connect 5s\n '
1556 'timeout http-request 1s\n '
1557 'timeout http-keep-alive 5s\n '
1558 'timeout client 30s\n '
1559 'timeout server 30s\n '
1560 'timeout check 5s\n '
1561 'maxconn 8000\n'
1562 '\nfrontend stats\n '
1563 'mode http\n '
1564 'bind 1.2.3.4:8999\n '
1565 'bind 1::4:8999\n '
1566 'stats enable\n '
1567 'stats uri /stats\n '
1568 'stats refresh 10s\n '
1569 'stats auth admin:12345\n '
1570 'http-request use-service prometheus-exporter if { path /metrics }\n '
1571 'monitor-uri /health\n'
1572 '\nfrontend frontend\n '
1573 'bind 1.2.3.4:8089\n '
1574 'default_backend backend\n\n'
1575 'backend backend\n '
1576 'option forwardfor\n '
1577 'default-server ssl\n '
1578 'default-server verify none\n '
1579 'balance static-rr\n '
1580 'option httpchk HEAD / HTTP/1.0\n '
1581 'server '
1582 + haproxy_generated_conf[1][0] + ' 1::4:443 check weight 100\n'
1583 }
1584 }
1585
1586 assert haproxy_generated_conf[0] == haproxy_expected_conf
1587
1588 @patch("cephadm.serve.CephadmServe._run_cephadm")
1589 def test_ingress_config_multi_vips(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1590 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1591
1592 with with_host(cephadm_module, 'test', addr='1.2.3.7'):
1593 cephadm_module.cache.update_host_networks('test', {
1594 '1.2.3.0/24': {
1595 'if0': ['1.2.3.4/32']
1596 }
1597 })
1598
1599 # Check the ingress with multiple VIPs
1600 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
1601 rgw_frontend_type='beast')
1602
1603 ispec = IngressSpec(service_type='ingress',
1604 service_id='test',
1605 backend_service='rgw.foo',
1606 frontend_port=8089,
1607 monitor_port=8999,
1608 monitor_user='admin',
1609 monitor_password='12345',
1610 keepalived_password='12345',
1611 virtual_interface_networks=['1.2.3.0/24'],
1612 virtual_ips_list=["1.2.3.4/32"])
1613 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
1614 # generate the keepalived conf based on the specified spec
1615 # Test with only 1 IP on the list, as it will fail with more VIPS but only one host.
1616 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
1617 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1618
1619 keepalived_expected_conf = {
1620 'files':
1621 {
1622 'keepalived.conf':
1623 '# This file is generated by cephadm.\n'
1624 'vrrp_script check_backend {\n '
1625 'script "/usr/bin/curl http://1.2.3.7:8999/health"\n '
1626 'weight -20\n '
1627 'interval 2\n '
1628 'rise 2\n '
1629 'fall 2\n}\n\n'
1630 'vrrp_instance VI_0 {\n '
1631 'state MASTER\n '
1632 'priority 100\n '
1633 'interface if0\n '
1634 'virtual_router_id 50\n '
1635 'advert_int 1\n '
1636 'authentication {\n '
1637 'auth_type PASS\n '
1638 'auth_pass 12345\n '
1639 '}\n '
1640 'unicast_src_ip 1.2.3.7\n '
1641 'unicast_peer {\n '
1642 '}\n '
1643 'virtual_ipaddress {\n '
1644 '1.2.3.4/32 dev if0\n '
1645 '}\n '
1646 'track_script {\n '
1647 'check_backend\n }\n'
1648 '}\n'
1649 }
1650 }
1651
1652 # check keepalived config
1653 assert keepalived_generated_conf[0] == keepalived_expected_conf
1654
1655 # generate the haproxy conf based on the specified spec
1656 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
1657 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1658
1659 haproxy_expected_conf = {
1660 'files':
1661 {
1662 'haproxy.cfg':
1663 '# This file is generated by cephadm.'
1664 '\nglobal\n log '
1665 '127.0.0.1 local2\n '
1666 'chroot /var/lib/haproxy\n '
1667 'pidfile /var/lib/haproxy/haproxy.pid\n '
1668 'maxconn 8000\n '
1669 'daemon\n '
1670 'stats socket /var/lib/haproxy/stats\n'
1671 '\ndefaults\n '
1672 'mode http\n '
1673 'log global\n '
1674 'option httplog\n '
1675 'option dontlognull\n '
1676 'option http-server-close\n '
1677 'option forwardfor except 127.0.0.0/8\n '
1678 'option redispatch\n '
1679 'retries 3\n '
1680 'timeout queue 20s\n '
1681 'timeout connect 5s\n '
1682 'timeout http-request 1s\n '
1683 'timeout http-keep-alive 5s\n '
1684 'timeout client 30s\n '
1685 'timeout server 30s\n '
1686 'timeout check 5s\n '
1687 'maxconn 8000\n'
1688 '\nfrontend stats\n '
1689 'mode http\n '
1690 'bind *:8999\n '
1691 'bind 1.2.3.7:8999\n '
1692 'stats enable\n '
1693 'stats uri /stats\n '
1694 'stats refresh 10s\n '
1695 'stats auth admin:12345\n '
1696 'http-request use-service prometheus-exporter if { path /metrics }\n '
1697 'monitor-uri /health\n'
1698 '\nfrontend frontend\n '
1699 'bind *:8089\n '
1700 'default_backend backend\n\n'
1701 'backend backend\n '
1702 'option forwardfor\n '
1703 'balance static-rr\n '
1704 'option httpchk HEAD / HTTP/1.0\n '
1705 'server '
1706 + haproxy_generated_conf[1][0] + ' 1.2.3.7:80 check weight 100\n'
1707 }
1708 }
1709
1710 assert haproxy_generated_conf[0] == haproxy_expected_conf
1711
1712 @patch("cephadm.serve.CephadmServe._run_cephadm")
1713 @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
1714 @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
1715 @patch("cephadm.services.nfs.NFSService.purge", MagicMock())
1716 @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock())
1717 def test_keepalive_only_nfs_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1718 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1719
1720 with with_host(cephadm_module, 'test', addr='1.2.3.7'):
1721 cephadm_module.cache.update_host_networks('test', {
1722 '1.2.3.0/24': {
1723 'if0': ['1.2.3.4/32']
1724 }
1725 })
1726
1727 # Check the ingress with multiple VIPs
1728 s = NFSServiceSpec(service_id="foo", placement=PlacementSpec(count=1),
1729 virtual_ip='1.2.3.0/24')
1730
1731 ispec = IngressSpec(service_type='ingress',
1732 service_id='test',
1733 backend_service='nfs.foo',
1734 monitor_port=8999,
1735 monitor_user='admin',
1736 monitor_password='12345',
1737 keepalived_password='12345',
1738 virtual_ip='1.2.3.0/24',
1739 keepalive_only=True)
1740 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
1741 nfs_generated_conf, _ = cephadm_module.cephadm_services['nfs'].generate_config(
1742 CephadmDaemonDeploySpec(host='test', daemon_id='foo.test.0.0', service_name=s.service_name()))
1743 ganesha_conf = nfs_generated_conf['files']['ganesha.conf']
1744 assert "Bind_addr = 1.2.3.0/24" in ganesha_conf
1745
1746 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
1747 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1748
1749 keepalived_expected_conf = {
1750 'files':
1751 {
1752 'keepalived.conf':
1753 '# This file is generated by cephadm.\n'
1754 'vrrp_script check_backend {\n '
1755 'script "/usr/bin/false"\n '
1756 'weight -20\n '
1757 'interval 2\n '
1758 'rise 2\n '
1759 'fall 2\n}\n\n'
1760 'vrrp_instance VI_0 {\n '
1761 'state MASTER\n '
1762 'priority 100\n '
1763 'interface if0\n '
1764 'virtual_router_id 50\n '
1765 'advert_int 1\n '
1766 'authentication {\n '
1767 'auth_type PASS\n '
1768 'auth_pass 12345\n '
1769 '}\n '
1770 'unicast_src_ip 1.2.3.7\n '
1771 'unicast_peer {\n '
1772 '}\n '
1773 'virtual_ipaddress {\n '
1774 '1.2.3.0/24 dev if0\n '
1775 '}\n '
1776 'track_script {\n '
1777 'check_backend\n }\n'
1778 '}\n'
1779 }
1780 }
1781
1782 # check keepalived config
1783 assert keepalived_generated_conf[0] == keepalived_expected_conf
1784
1785
1786 class TestCephFsMirror:
1787 @patch("cephadm.serve.CephadmServe._run_cephadm")
1788 def test_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1789 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1790 with with_host(cephadm_module, 'test'):
1791 with with_service(cephadm_module, ServiceSpec('cephfs-mirror')):
1792 cephadm_module.assert_issued_mon_command({
1793 'prefix': 'mgr module enable',
1794 'module': 'mirroring'
1795 })
1796
1797
1798 class TestJaeger:
1799 @patch("cephadm.serve.CephadmServe._run_cephadm")
1800 def test_jaeger_query(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1801 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1802
1803 spec = TracingSpec(es_nodes="192.168.0.1:9200",
1804 service_type="jaeger-query")
1805
1806 config = {"elasticsearch_nodes": "http://192.168.0.1:9200"}
1807
1808 with with_host(cephadm_module, 'test'):
1809 with with_service(cephadm_module, spec):
1810 _run_cephadm.assert_called_with(
1811 'test',
1812 'jaeger-query.test',
1813 'deploy',
1814 [
1815 '--name', 'jaeger-query.test',
1816 '--meta-json',
1817 ('{"service_name": "jaeger-query", "ports": [16686], "ip": null, "deployed_by": [], "rank": null, '
1818 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1819 '--config-json', '-',
1820 '--tcp-ports', '16686'
1821
1822 ],
1823 stdin=json.dumps(config),
1824 image=''
1825 )
1826
1827 @patch("cephadm.serve.CephadmServe._run_cephadm")
1828 def test_jaeger_collector_es_deploy(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1829 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1830
1831 collector_spec = TracingSpec(service_type="jaeger-collector")
1832 es_spec = TracingSpec(service_type="elasticsearch")
1833 es_config = {}
1834
1835 with with_host(cephadm_module, 'test'):
1836 collector_config = {
1837 "elasticsearch_nodes": f'http://{build_url(host=cephadm_module.inventory.get_addr("test"), port=9200).lstrip("/")}'}
1838 with with_service(cephadm_module, es_spec):
1839 _run_cephadm.assert_called_with(
1840 'test',
1841 'elasticsearch.test',
1842 'deploy',
1843 [
1844 '--name', 'elasticsearch.test',
1845 '--meta-json',
1846 ('{"service_name": "elasticsearch", "ports": [9200], "ip": null, "deployed_by": [], "rank": null, '
1847 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1848 '--config-json', '-',
1849 '--tcp-ports', '9200'
1850
1851 ],
1852 stdin=json.dumps(es_config),
1853 image=''
1854 )
1855 with with_service(cephadm_module, collector_spec):
1856 _run_cephadm.assert_called_with(
1857 'test',
1858 'jaeger-collector.test',
1859 'deploy',
1860 [
1861 '--name', 'jaeger-collector.test',
1862 '--meta-json',
1863 ('{"service_name": "jaeger-collector", "ports": [14250], "ip": null, "deployed_by": [], "rank": null, '
1864 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1865 '--config-json', '-',
1866 '--tcp-ports', '14250'
1867
1868 ],
1869 stdin=json.dumps(collector_config),
1870 image=''
1871 )
1872
1873 @patch("cephadm.serve.CephadmServe._run_cephadm")
1874 def test_jaeger_agent(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1875 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1876
1877 collector_spec = TracingSpec(service_type="jaeger-collector", es_nodes="192.168.0.1:9200")
1878 collector_config = {"elasticsearch_nodes": "http://192.168.0.1:9200"}
1879
1880 agent_spec = TracingSpec(service_type="jaeger-agent")
1881 agent_config = {"collector_nodes": "test:14250"}
1882
1883 with with_host(cephadm_module, 'test'):
1884 with with_service(cephadm_module, collector_spec):
1885 _run_cephadm.assert_called_with(
1886 'test',
1887 'jaeger-collector.test',
1888 'deploy',
1889 [
1890 '--name', 'jaeger-collector.test',
1891 '--meta-json',
1892 ('{"service_name": "jaeger-collector", "ports": [14250], "ip": null, "deployed_by": [], "rank": null, '
1893 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1894 '--config-json', '-',
1895 '--tcp-ports', '14250'
1896
1897 ],
1898 stdin=json.dumps(collector_config),
1899 image=''
1900 )
1901 with with_service(cephadm_module, agent_spec):
1902 _run_cephadm.assert_called_with(
1903 'test',
1904 'jaeger-agent.test',
1905 'deploy',
1906 [
1907 '--name', 'jaeger-agent.test',
1908 '--meta-json',
1909 ('{"service_name": "jaeger-agent", "ports": [6799], "ip": null, "deployed_by": [], "rank": null, '
1910 '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
1911 '--config-json', '-',
1912 '--tcp-ports', '6799'
1913
1914 ],
1915 stdin=json.dumps(agent_config),
1916 image=''
1917 )