]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/tests/test_services.py
98dcc850f20505fb5b88723bf798b25d8c1a8235
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_services.py
1 from textwrap import dedent
2 import json
3 import yaml
4
5 import pytest
6
7 from unittest.mock import MagicMock, call, patch, ANY
8
9 from cephadm.serve import CephadmServe
10 from cephadm.services.cephadmservice import MonService, MgrService, MdsService, RgwService, \
11 RbdMirrorService, CrashService, CephadmDaemonDeploySpec
12 from cephadm.services.iscsi import IscsiService
13 from cephadm.services.nfs import NFSService
14 from cephadm.services.osd import OSDService
15 from cephadm.services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \
16 NodeExporterService, LokiService, PromtailService
17 from cephadm.module import CephadmOrchestrator
18 from ceph.deployment.service_spec import IscsiServiceSpec, MonitoringSpec, AlertManagerSpec, \
19 ServiceSpec, RGWSpec, GrafanaSpec, SNMPGatewaySpec, IngressSpec, PlacementSpec
20 from cephadm.tests.fixtures import with_host, with_service, _run_cephadm, async_side_effect
21
22 from orchestrator import OrchestratorError
23 from orchestrator._interface import DaemonDescription
24
25
26 class FakeInventory:
27 def get_addr(self, name: str) -> str:
28 return '1.2.3.4'
29
30
31 class FakeMgr:
32 def __init__(self):
33 self.config = ''
34 self.check_mon_command = MagicMock(side_effect=self._check_mon_command)
35 self.mon_command = MagicMock(side_effect=self._check_mon_command)
36 self.template = MagicMock()
37 self.log = MagicMock()
38 self.inventory = FakeInventory()
39
40 def _check_mon_command(self, cmd_dict, inbuf=None):
41 prefix = cmd_dict.get('prefix')
42 if prefix == 'get-cmd':
43 return 0, self.config, ''
44 if prefix == 'set-cmd':
45 self.config = cmd_dict.get('value')
46 return 0, 'value set', ''
47 return -1, '', 'error'
48
49 def get_minimal_ceph_conf(self) -> str:
50 return ''
51
52 def get_mgr_ip(self) -> str:
53 return '1.2.3.4'
54
55
56 class TestCephadmService:
57 def test_set_service_url_on_dashboard(self):
58 # pylint: disable=protected-access
59 mgr = FakeMgr()
60 service_url = 'http://svc:1000'
61 service = GrafanaService(mgr)
62 service._set_service_url_on_dashboard('svc', 'get-cmd', 'set-cmd', service_url)
63 assert mgr.config == service_url
64
65 # set-cmd should not be called if value doesn't change
66 mgr.check_mon_command.reset_mock()
67 service._set_service_url_on_dashboard('svc', 'get-cmd', 'set-cmd', service_url)
68 mgr.check_mon_command.assert_called_once_with({'prefix': 'get-cmd'})
69
70 def _get_services(self, mgr):
71 # services:
72 osd_service = OSDService(mgr)
73 nfs_service = NFSService(mgr)
74 mon_service = MonService(mgr)
75 mgr_service = MgrService(mgr)
76 mds_service = MdsService(mgr)
77 rgw_service = RgwService(mgr)
78 rbd_mirror_service = RbdMirrorService(mgr)
79 grafana_service = GrafanaService(mgr)
80 alertmanager_service = AlertmanagerService(mgr)
81 prometheus_service = PrometheusService(mgr)
82 node_exporter_service = NodeExporterService(mgr)
83 loki_service = LokiService(mgr)
84 promtail_service = PromtailService(mgr)
85 crash_service = CrashService(mgr)
86 iscsi_service = IscsiService(mgr)
87 cephadm_services = {
88 'mon': mon_service,
89 'mgr': mgr_service,
90 'osd': osd_service,
91 'mds': mds_service,
92 'rgw': rgw_service,
93 'rbd-mirror': rbd_mirror_service,
94 'nfs': nfs_service,
95 'grafana': grafana_service,
96 'alertmanager': alertmanager_service,
97 'prometheus': prometheus_service,
98 'node-exporter': node_exporter_service,
99 'loki': loki_service,
100 'promtail': promtail_service,
101 'crash': crash_service,
102 'iscsi': iscsi_service,
103 }
104 return cephadm_services
105
106 def test_get_auth_entity(self):
107 mgr = FakeMgr()
108 cephadm_services = self._get_services(mgr)
109
110 for daemon_type in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]:
111 assert "client.%s.id1" % (daemon_type) == \
112 cephadm_services[daemon_type].get_auth_entity("id1", "host")
113 assert "client.%s.id1" % (daemon_type) == \
114 cephadm_services[daemon_type].get_auth_entity("id1", "")
115 assert "client.%s.id1" % (daemon_type) == \
116 cephadm_services[daemon_type].get_auth_entity("id1")
117
118 assert "client.crash.host" == \
119 cephadm_services["crash"].get_auth_entity("id1", "host")
120 with pytest.raises(OrchestratorError):
121 cephadm_services["crash"].get_auth_entity("id1", "")
122 cephadm_services["crash"].get_auth_entity("id1")
123
124 assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "host")
125 assert "mon." == cephadm_services["mon"].get_auth_entity("id1", "")
126 assert "mon." == cephadm_services["mon"].get_auth_entity("id1")
127
128 assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1", "host")
129 assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1", "")
130 assert "mgr.id1" == cephadm_services["mgr"].get_auth_entity("id1")
131
132 for daemon_type in ["osd", "mds"]:
133 assert "%s.id1" % daemon_type == \
134 cephadm_services[daemon_type].get_auth_entity("id1", "host")
135 assert "%s.id1" % daemon_type == \
136 cephadm_services[daemon_type].get_auth_entity("id1", "")
137 assert "%s.id1" % daemon_type == \
138 cephadm_services[daemon_type].get_auth_entity("id1")
139
140 # services based on CephadmService shouldn't have get_auth_entity
141 with pytest.raises(AttributeError):
142 for daemon_type in ['grafana', 'alertmanager', 'prometheus', 'node-exporter', 'loki', 'promtail']:
143 cephadm_services[daemon_type].get_auth_entity("id1", "host")
144 cephadm_services[daemon_type].get_auth_entity("id1", "")
145 cephadm_services[daemon_type].get_auth_entity("id1")
146
147
148 class TestISCSIService:
149
150 mgr = FakeMgr()
151 iscsi_service = IscsiService(mgr)
152
153 iscsi_spec = IscsiServiceSpec(service_type='iscsi', service_id="a")
154 iscsi_spec.daemon_type = "iscsi"
155 iscsi_spec.daemon_id = "a"
156 iscsi_spec.spec = MagicMock()
157 iscsi_spec.spec.daemon_type = "iscsi"
158 iscsi_spec.spec.ssl_cert = ''
159 iscsi_spec.api_user = "user"
160 iscsi_spec.api_password = "password"
161 iscsi_spec.api_port = 5000
162 iscsi_spec.api_secure = False
163 iscsi_spec.ssl_cert = "cert"
164 iscsi_spec.ssl_key = "key"
165
166 mgr.spec_store = MagicMock()
167 mgr.spec_store.all_specs.get.return_value = iscsi_spec
168
169 def test_iscsi_client_caps(self):
170
171 iscsi_daemon_spec = CephadmDaemonDeploySpec(
172 host='host', daemon_id='a', service_name=self.iscsi_spec.service_name())
173
174 self.iscsi_service.prepare_create(iscsi_daemon_spec)
175
176 expected_caps = ['mon',
177 'profile rbd, allow command "osd blocklist", allow command "config-key get" with "key" prefix "iscsi/"',
178 'mgr', 'allow command "service status"',
179 'osd', 'allow rwx']
180
181 expected_call = call({'prefix': 'auth get-or-create',
182 'entity': 'client.iscsi.a',
183 'caps': expected_caps})
184 expected_call2 = call({'prefix': 'auth caps',
185 'entity': 'client.iscsi.a',
186 'caps': expected_caps})
187
188 assert expected_call in self.mgr.mon_command.mock_calls
189 assert expected_call2 in self.mgr.mon_command.mock_calls
190
191 @patch('cephadm.utils.resolve_ip')
192 def test_iscsi_dashboard_config(self, mock_resolve_ip):
193
194 self.mgr.check_mon_command = MagicMock()
195 self.mgr.check_mon_command.return_value = ('', '{"gateways": {}}', '')
196
197 # Case 1: use IPV4 address
198 id1 = DaemonDescription(daemon_type='iscsi', hostname="testhost1",
199 daemon_id="a", ip='192.168.1.1')
200 daemon_list = [id1]
201 mock_resolve_ip.return_value = '192.168.1.1'
202
203 self.iscsi_service.config_dashboard(daemon_list)
204
205 dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add',
206 'name': 'testhost1'},
207 'http://user:password@192.168.1.1:5000')
208
209 assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls
210
211 # Case 2: use IPV6 address
212 self.mgr.check_mon_command.reset_mock()
213
214 id1 = DaemonDescription(daemon_type='iscsi', hostname="testhost1",
215 daemon_id="a", ip='FEDC:BA98:7654:3210:FEDC:BA98:7654:3210')
216 mock_resolve_ip.return_value = 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210'
217
218 self.iscsi_service.config_dashboard(daemon_list)
219
220 dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add',
221 'name': 'testhost1'},
222 'http://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000')
223
224 assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls
225
226 # Case 3: IPV6 Address . Secure protocol
227 self.mgr.check_mon_command.reset_mock()
228
229 self.iscsi_spec.api_secure = True
230
231 self.iscsi_service.config_dashboard(daemon_list)
232
233 dashboard_expected_call = call({'prefix': 'dashboard iscsi-gateway-add',
234 'name': 'testhost1'},
235 'https://user:password@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:5000')
236
237 assert dashboard_expected_call in self.mgr.check_mon_command.mock_calls
238
239
240 class TestMonitoring:
241 def _get_config(self, url: str) -> str:
242 return f"""
243 # This file is generated by cephadm.
244 # See https://prometheus.io/docs/alerting/configuration/ for documentation.
245
246 global:
247 resolve_timeout: 5m
248 http_config:
249 tls_config:
250 insecure_skip_verify: true
251
252 route:
253 receiver: 'default'
254 routes:
255 - group_by: ['alertname']
256 group_wait: 10s
257 group_interval: 10s
258 repeat_interval: 1h
259 receiver: 'ceph-dashboard'
260
261 receivers:
262 - name: 'default'
263 webhook_configs:
264 - name: 'ceph-dashboard'
265 webhook_configs:
266 - url: '{url}/api/prometheus_receiver'
267 """
268
269 @patch("cephadm.serve.CephadmServe._run_cephadm")
270 @patch("mgr_module.MgrModule.get")
271 def test_alertmanager_config(self, mock_get, _run_cephadm,
272 cephadm_module: CephadmOrchestrator):
273 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
274 mock_get.return_value = {"services": {"dashboard": "http://[::1]:8080"}}
275
276 with with_host(cephadm_module, 'test'):
277 with with_service(cephadm_module, AlertManagerSpec()):
278 y = dedent(self._get_config('http://localhost:8080')).lstrip()
279 _run_cephadm.assert_called_with(
280 'test',
281 'alertmanager.test',
282 'deploy',
283 [
284 '--name', 'alertmanager.test',
285 '--meta-json', '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
286 '--config-json', '-', '--tcp-ports', '9093 9094'
287 ],
288 stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}),
289 image='')
290
291 @patch("cephadm.serve.CephadmServe._run_cephadm")
292 @patch("mgr_module.MgrModule.get")
293 def test_alertmanager_config_v6(self, mock_get, _run_cephadm,
294 cephadm_module: CephadmOrchestrator):
295 dashboard_url = "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080"
296 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
297 mock_get.return_value = {"services": {"dashboard": dashboard_url}}
298
299 with with_host(cephadm_module, 'test'):
300 with with_service(cephadm_module, AlertManagerSpec()):
301 y = dedent(self._get_config(dashboard_url)).lstrip()
302 _run_cephadm.assert_called_with(
303 'test',
304 'alertmanager.test',
305 'deploy',
306 [
307 '--name', 'alertmanager.test',
308 '--meta-json',
309 '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
310 '--config-json', '-', '--tcp-ports', '9093 9094'
311 ],
312 stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}),
313 image='')
314
315 @patch("cephadm.serve.CephadmServe._run_cephadm")
316 @patch("mgr_module.MgrModule.get")
317 @patch("socket.getfqdn")
318 def test_alertmanager_config_v6_fqdn(self, mock_getfqdn, mock_get, _run_cephadm,
319 cephadm_module: CephadmOrchestrator):
320 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
321 mock_getfqdn.return_value = "mgr.test.fqdn"
322 mock_get.return_value = {"services": {
323 "dashboard": "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080"}}
324
325 with with_host(cephadm_module, 'test'):
326 with with_service(cephadm_module, AlertManagerSpec()):
327 y = dedent(self._get_config("http://mgr.test.fqdn:8080")).lstrip()
328 _run_cephadm.assert_called_with(
329 'test',
330 'alertmanager.test',
331 'deploy',
332 [
333 '--name', 'alertmanager.test',
334 '--meta-json',
335 '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
336 '--config-json', '-', '--tcp-ports', '9093 9094'
337 ],
338 stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}),
339 image='')
340
341 @patch("cephadm.serve.CephadmServe._run_cephadm")
342 @patch("mgr_module.MgrModule.get")
343 def test_alertmanager_config_v4(self, mock_get, _run_cephadm, cephadm_module: CephadmOrchestrator):
344 dashboard_url = "http://192.168.0.123:8080"
345 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
346 mock_get.return_value = {"services": {"dashboard": dashboard_url}}
347
348 with with_host(cephadm_module, 'test'):
349 with with_service(cephadm_module, AlertManagerSpec()):
350 y = dedent(self._get_config(dashboard_url)).lstrip()
351 _run_cephadm.assert_called_with(
352 'test',
353 'alertmanager.test',
354 'deploy',
355 [
356 '--name', 'alertmanager.test',
357 '--meta-json', '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
358 '--config-json', '-', '--tcp-ports', '9093 9094'
359 ],
360 stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}),
361 image='')
362
363 @patch("cephadm.serve.CephadmServe._run_cephadm")
364 @patch("mgr_module.MgrModule.get")
365 @patch("socket.getfqdn")
366 def test_alertmanager_config_v4_fqdn(self, mock_getfqdn, mock_get, _run_cephadm,
367 cephadm_module: CephadmOrchestrator):
368 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
369 mock_getfqdn.return_value = "mgr.test.fqdn"
370 mock_get.return_value = {"services": {"dashboard": "http://192.168.0.123:8080"}}
371
372 with with_host(cephadm_module, 'test'):
373 with with_service(cephadm_module, AlertManagerSpec()):
374 y = dedent(self._get_config("http://mgr.test.fqdn:8080")).lstrip()
375 _run_cephadm.assert_called_with(
376 'test',
377 'alertmanager.test',
378 'deploy',
379 [
380 '--name', 'alertmanager.test',
381 '--meta-json',
382 '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
383 '--config-json', '-', '--tcp-ports', '9093 9094'
384 ],
385 stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}),
386 image='')
387
388 @patch("cephadm.serve.CephadmServe._run_cephadm")
389 def test_prometheus_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
390 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
391
392 with with_host(cephadm_module, 'test'):
393 with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \
394 with_service(cephadm_module, MonitoringSpec('prometheus')) as _:
395
396 y = dedent("""
397 # This file is generated by cephadm.
398 global:
399 scrape_interval: 10s
400 evaluation_interval: 10s
401 rule_files:
402 - /etc/prometheus/alerting/*
403 scrape_configs:
404 - job_name: 'ceph'
405 honor_labels: true
406 static_configs:
407 - targets:
408 - '[::1]:9283'
409
410 - job_name: 'node'
411 static_configs:
412 - targets: ['[1::4]:9100']
413 labels:
414 instance: 'test'
415
416 """).lstrip()
417
418 _run_cephadm.assert_called_with(
419 'test',
420 'prometheus.test',
421 'deploy',
422 [
423 '--name', 'prometheus.test',
424 '--meta-json',
425 '{"service_name": "prometheus", "ports": [9095], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
426 '--config-json', '-',
427 '--tcp-ports', '9095'
428 ],
429 stdin=json.dumps({"files": {"prometheus.yml": y,
430 "/etc/prometheus/alerting/custom_alerts.yml": ""}}),
431 image='')
432
433 @patch("cephadm.serve.CephadmServe._run_cephadm")
434 def test_loki_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
435 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
436
437 with with_host(cephadm_module, 'test'):
438 with with_service(cephadm_module, MonitoringSpec('loki')) as _:
439
440 y = dedent("""
441 # This file is generated by cephadm.
442 auth_enabled: false
443
444 server:
445 http_listen_port: 3100
446 grpc_listen_port: 8080
447
448 common:
449 path_prefix: /tmp/loki
450 storage:
451 filesystem:
452 chunks_directory: /tmp/loki/chunks
453 rules_directory: /tmp/loki/rules
454 replication_factor: 1
455 ring:
456 instance_addr: 127.0.0.1
457 kvstore:
458 store: inmemory
459
460 schema_config:
461 configs:
462 - from: 2020-10-24
463 store: boltdb-shipper
464 object_store: filesystem
465 schema: v11
466 index:
467 prefix: index_
468 period: 24h""").lstrip()
469
470 _run_cephadm.assert_called_with(
471 'test',
472 'loki.test',
473 'deploy',
474 [
475 '--name', 'loki.test',
476 '--meta-json',
477 '{"service_name": "loki", "ports": [3100], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
478 '--config-json', '-',
479 '--tcp-ports', '3100'
480 ],
481 stdin=json.dumps({"files": {"loki.yml": y}}),
482 image='')
483
484 @patch("cephadm.serve.CephadmServe._run_cephadm")
485 def test_promtail_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
486 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
487
488 with with_host(cephadm_module, 'test'):
489 with with_service(cephadm_module, ServiceSpec('mgr')) as _, \
490 with_service(cephadm_module, MonitoringSpec('promtail')) as _:
491
492 y = dedent("""
493 # This file is generated by cephadm.
494 server:
495 http_listen_port: 9080
496 grpc_listen_port: 0
497
498 positions:
499 filename: /tmp/positions.yaml
500
501 clients:
502 - url: http://:3100/loki/api/v1/push
503
504 scrape_configs:
505 - job_name: system
506 static_configs:
507 - labels:
508 job: Cluster Logs
509 __path__: /var/log/ceph/**/*.log""").lstrip()
510
511 _run_cephadm.assert_called_with(
512 'test',
513 'promtail.test',
514 'deploy',
515 [
516 '--name', 'promtail.test',
517 '--meta-json',
518 '{"service_name": "promtail", "ports": [9080], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
519 '--config-json', '-',
520 '--tcp-ports', '9080'
521 ],
522 stdin=json.dumps({"files": {"promtail.yml": y}}),
523 image='')
524
525 @patch("cephadm.serve.CephadmServe._run_cephadm")
526 @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4')
527 @patch("cephadm.services.monitoring.verify_tls", lambda *_: None)
528 def test_grafana_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
529 _run_cephadm.side_effect = async_side_effect(("{}", "", 0))
530
531 with with_host(cephadm_module, "test"):
532 cephadm_module.set_store("test/grafana_crt", "c")
533 cephadm_module.set_store("test/grafana_key", "k")
534 with with_service(
535 cephadm_module, MonitoringSpec("prometheus")
536 ) as _, with_service(cephadm_module, ServiceSpec("mgr")) as _, with_service(
537 cephadm_module, GrafanaSpec("grafana")
538 ) as _:
539 files = {
540 'grafana.ini': dedent("""
541 # This file is generated by cephadm.
542 [users]
543 default_theme = light
544 [auth.anonymous]
545 enabled = true
546 org_name = 'Main Org.'
547 org_role = 'Viewer'
548 [server]
549 domain = 'bootstrap.storage.lab'
550 protocol = https
551 cert_file = /etc/grafana/certs/cert_file
552 cert_key = /etc/grafana/certs/cert_key
553 http_port = 3000
554 http_addr =
555 [snapshots]
556 external_enabled = false
557 [security]
558 disable_initial_admin_creation = true
559 cookie_secure = true
560 cookie_samesite = none
561 allow_embedding = true""").lstrip(), # noqa: W291
562 'provisioning/datasources/ceph-dashboard.yml': dedent("""
563 # This file is generated by cephadm.
564 deleteDatasources:
565 - name: 'Dashboard1'
566 orgId: 1
567
568 - name: 'Loki'
569 orgId: 2
570
571 datasources:
572 - name: 'Dashboard1'
573 type: 'prometheus'
574 access: 'proxy'
575 orgId: 1
576 url: 'http://[1::4]:9095'
577 basicAuth: false
578 isDefault: true
579 editable: false
580
581 - name: 'Loki'
582 type: 'loki'
583 access: 'proxy'
584 orgId: 2
585 url: ''
586 basicAuth: false
587 isDefault: true
588 editable: false""").lstrip(),
589 'certs/cert_file': dedent("""
590 # generated by cephadm
591 c""").lstrip(),
592 'certs/cert_key': dedent("""
593 # generated by cephadm
594 k""").lstrip(),
595 }
596
597 _run_cephadm.assert_called_with(
598 'test',
599 'grafana.test',
600 'deploy',
601 [
602 '--name', 'grafana.test',
603 '--meta-json',
604 '{"service_name": "grafana", "ports": [3000], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
605 '--config-json', '-', '--tcp-ports', '3000'],
606 stdin=json.dumps({"files": files}),
607 image='')
608
609 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
610 def test_grafana_initial_admin_pw(self, cephadm_module: CephadmOrchestrator):
611 with with_host(cephadm_module, 'test'):
612 with with_service(cephadm_module, ServiceSpec('mgr')) as _, \
613 with_service(cephadm_module, GrafanaSpec(initial_admin_password='secure')):
614 out = cephadm_module.cephadm_services['grafana'].generate_config(
615 CephadmDaemonDeploySpec('test', 'daemon', 'grafana'))
616 assert out == (
617 {
618 'files':
619 {
620 'grafana.ini':
621 '# This file is generated by cephadm.\n'
622 '[users]\n'
623 ' default_theme = light\n'
624 '[auth.anonymous]\n'
625 ' enabled = true\n'
626 " org_name = 'Main Org.'\n"
627 " org_role = 'Viewer'\n"
628 '[server]\n'
629 " domain = 'bootstrap.storage.lab'\n"
630 ' protocol = https\n'
631 ' cert_file = /etc/grafana/certs/cert_file\n'
632 ' cert_key = /etc/grafana/certs/cert_key\n'
633 ' http_port = 3000\n'
634 ' http_addr = \n'
635 '[snapshots]\n'
636 ' external_enabled = false\n'
637 '[security]\n'
638 ' admin_user = admin\n'
639 ' admin_password = secure\n'
640 ' cookie_secure = true\n'
641 ' cookie_samesite = none\n'
642 ' allow_embedding = true',
643 'provisioning/datasources/ceph-dashboard.yml':
644 "# This file is generated by cephadm.\n"
645 'deleteDatasources:\n\n'
646 " - name: 'Loki'\n"
647 ' orgId: 2\n\n'
648 'datasources:\n\n'
649 " - name: 'Loki'\n"
650 " type: 'loki'\n"
651 " access: 'proxy'\n"
652 ' orgId: 2\n'
653 " url: ''\n"
654 ' basicAuth: false\n'
655 ' isDefault: true\n'
656 ' editable: false',
657 'certs/cert_file': ANY,
658 'certs/cert_key': ANY}}, [])
659
660 @patch("cephadm.serve.CephadmServe._run_cephadm")
661 def test_monitoring_ports(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
662 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
663
664 with with_host(cephadm_module, 'test'):
665
666 yaml_str = """service_type: alertmanager
667 service_name: alertmanager
668 placement:
669 count: 1
670 spec:
671 port: 4200
672 """
673 yaml_file = yaml.safe_load(yaml_str)
674 spec = ServiceSpec.from_json(yaml_file)
675
676 with patch("cephadm.services.monitoring.AlertmanagerService.generate_config", return_value=({}, [])):
677 with with_service(cephadm_module, spec):
678
679 CephadmServe(cephadm_module)._check_daemons()
680
681 _run_cephadm.assert_called_with(
682 'test', 'alertmanager.test', 'deploy', [
683 '--name', 'alertmanager.test',
684 '--meta-json', '{"service_name": "alertmanager", "ports": [4200, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
685 '--config-json', '-',
686 '--tcp-ports', '4200 9094',
687 '--reconfig'
688 ],
689 stdin='{}',
690 image='')
691
692
693 class TestRGWService:
694
695 @pytest.mark.parametrize(
696 "frontend, ssl, expected",
697 [
698 ('beast', False, 'beast endpoint=[fd00:fd00:fd00:3000::1]:80'),
699 ('beast', True,
700 'beast ssl_endpoint=[fd00:fd00:fd00:3000::1]:443 ssl_certificate=config://rgw/cert/rgw.foo'),
701 ('civetweb', False, 'civetweb port=[fd00:fd00:fd00:3000::1]:80'),
702 ('civetweb', True,
703 'civetweb port=[fd00:fd00:fd00:3000::1]:443s ssl_certificate=config://rgw/cert/rgw.foo'),
704 ]
705 )
706 @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
707 def test_rgw_update(self, frontend, ssl, expected, cephadm_module: CephadmOrchestrator):
708 with with_host(cephadm_module, 'host1'):
709 cephadm_module.cache.update_host_networks('host1', {
710 'fd00:fd00:fd00:3000::/64': {
711 'if0': ['fd00:fd00:fd00:3000::1']
712 }
713 })
714 s = RGWSpec(service_id="foo",
715 networks=['fd00:fd00:fd00:3000::/64'],
716 ssl=ssl,
717 rgw_frontend_type=frontend)
718 with with_service(cephadm_module, s) as dds:
719 _, f, _ = cephadm_module.check_mon_command({
720 'prefix': 'config get',
721 'who': f'client.{dds[0]}',
722 'key': 'rgw_frontends',
723 })
724 assert f == expected
725
726
727 class TestSNMPGateway:
728
729 @patch("cephadm.serve.CephadmServe._run_cephadm")
730 def test_snmp_v2c_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
731 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
732
733 spec = SNMPGatewaySpec(
734 snmp_version='V2c',
735 snmp_destination='192.168.1.1:162',
736 credentials={
737 'snmp_community': 'public'
738 })
739
740 config = {
741 "destination": spec.snmp_destination,
742 "snmp_version": spec.snmp_version,
743 "snmp_community": spec.credentials.get('snmp_community')
744 }
745
746 with with_host(cephadm_module, 'test'):
747 with with_service(cephadm_module, spec):
748 _run_cephadm.assert_called_with(
749 'test',
750 'snmp-gateway.test',
751 'deploy',
752 [
753 '--name', 'snmp-gateway.test',
754 '--meta-json',
755 '{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
756 '--config-json', '-',
757 '--tcp-ports', '9464'
758 ],
759 stdin=json.dumps(config),
760 image=''
761 )
762
763 @patch("cephadm.serve.CephadmServe._run_cephadm")
764 def test_snmp_v2c_with_port(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
765 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
766
767 spec = SNMPGatewaySpec(
768 snmp_version='V2c',
769 snmp_destination='192.168.1.1:162',
770 credentials={
771 'snmp_community': 'public'
772 },
773 port=9465)
774
775 config = {
776 "destination": spec.snmp_destination,
777 "snmp_version": spec.snmp_version,
778 "snmp_community": spec.credentials.get('snmp_community')
779 }
780
781 with with_host(cephadm_module, 'test'):
782 with with_service(cephadm_module, spec):
783 _run_cephadm.assert_called_with(
784 'test',
785 'snmp-gateway.test',
786 'deploy',
787 [
788 '--name', 'snmp-gateway.test',
789 '--meta-json',
790 '{"service_name": "snmp-gateway", "ports": [9465], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
791 '--config-json', '-',
792 '--tcp-ports', '9465'
793 ],
794 stdin=json.dumps(config),
795 image=''
796 )
797
798 @patch("cephadm.serve.CephadmServe._run_cephadm")
799 def test_snmp_v3nopriv_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
800 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
801
802 spec = SNMPGatewaySpec(
803 snmp_version='V3',
804 snmp_destination='192.168.1.1:162',
805 engine_id='8000C53F00000000',
806 credentials={
807 'snmp_v3_auth_username': 'myuser',
808 'snmp_v3_auth_password': 'mypassword'
809 })
810
811 config = {
812 'destination': spec.snmp_destination,
813 'snmp_version': spec.snmp_version,
814 'snmp_v3_auth_protocol': 'SHA',
815 'snmp_v3_auth_username': 'myuser',
816 'snmp_v3_auth_password': 'mypassword',
817 'snmp_v3_engine_id': '8000C53F00000000'
818 }
819
820 with with_host(cephadm_module, 'test'):
821 with with_service(cephadm_module, spec):
822 _run_cephadm.assert_called_with(
823 'test',
824 'snmp-gateway.test',
825 'deploy',
826 [
827 '--name', 'snmp-gateway.test',
828 '--meta-json',
829 '{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
830 '--config-json', '-',
831 '--tcp-ports', '9464'
832 ],
833 stdin=json.dumps(config),
834 image=''
835 )
836
837 @patch("cephadm.serve.CephadmServe._run_cephadm")
838 def test_snmp_v3priv_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
839 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
840
841 spec = SNMPGatewaySpec(
842 snmp_version='V3',
843 snmp_destination='192.168.1.1:162',
844 engine_id='8000C53F00000000',
845 auth_protocol='MD5',
846 privacy_protocol='AES',
847 credentials={
848 'snmp_v3_auth_username': 'myuser',
849 'snmp_v3_auth_password': 'mypassword',
850 'snmp_v3_priv_password': 'mysecret',
851 })
852
853 config = {
854 'destination': spec.snmp_destination,
855 'snmp_version': spec.snmp_version,
856 'snmp_v3_auth_protocol': 'MD5',
857 'snmp_v3_auth_username': spec.credentials.get('snmp_v3_auth_username'),
858 'snmp_v3_auth_password': spec.credentials.get('snmp_v3_auth_password'),
859 'snmp_v3_engine_id': '8000C53F00000000',
860 'snmp_v3_priv_protocol': spec.privacy_protocol,
861 'snmp_v3_priv_password': spec.credentials.get('snmp_v3_priv_password'),
862 }
863
864 with with_host(cephadm_module, 'test'):
865 with with_service(cephadm_module, spec):
866 _run_cephadm.assert_called_with(
867 'test',
868 'snmp-gateway.test',
869 'deploy',
870 [
871 '--name', 'snmp-gateway.test',
872 '--meta-json',
873 '{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
874 '--config-json', '-',
875 '--tcp-ports', '9464'
876 ],
877 stdin=json.dumps(config),
878 image=''
879 )
880
881
882 class TestIngressService:
883
884 @patch("cephadm.serve.CephadmServe._run_cephadm")
885 def test_ingress_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
886 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
887
888 with with_host(cephadm_module, 'test'):
889 cephadm_module.cache.update_host_networks('test', {
890 '1.2.3.0/24': {
891 'if0': ['1.2.3.4/32']
892 }
893 })
894
895 # the ingress backend
896 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
897 rgw_frontend_type='beast')
898
899 ispec = IngressSpec(service_type='ingress',
900 service_id='test',
901 backend_service='rgw.foo',
902 frontend_port=8089,
903 monitor_port=8999,
904 monitor_user='admin',
905 monitor_password='12345',
906 keepalived_password='12345',
907 virtual_interface_networks=['1.2.3.0/24'],
908 virtual_ip="1.2.3.4/32")
909 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
910 # generate the keepalived conf based on the specified spec
911 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
912 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
913
914 keepalived_expected_conf = {
915 'files':
916 {
917 'keepalived.conf':
918 '# This file is generated by cephadm.\n'
919 'vrrp_script check_backend {\n '
920 'script "/usr/bin/curl http://localhost:8999/health"\n '
921 'weight -20\n '
922 'interval 2\n '
923 'rise 2\n '
924 'fall 2\n}\n\n'
925 'vrrp_instance VI_0 {\n '
926 'state MASTER\n '
927 'priority 100\n '
928 'interface if0\n '
929 'virtual_router_id 50\n '
930 'advert_int 1\n '
931 'authentication {\n '
932 'auth_type PASS\n '
933 'auth_pass 12345\n '
934 '}\n '
935 'unicast_src_ip 1::4\n '
936 'unicast_peer {\n '
937 '}\n '
938 'virtual_ipaddress {\n '
939 '1.2.3.4/32 dev if0\n '
940 '}\n '
941 'track_script {\n '
942 'check_backend\n }\n'
943 '}\n'
944 }
945 }
946
947 # check keepalived config
948 assert keepalived_generated_conf[0] == keepalived_expected_conf
949
950 # generate the haproxy conf based on the specified spec
951 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
952 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
953
954 haproxy_expected_conf = {
955 'files':
956 {
957 'haproxy.cfg':
958 '# This file is generated by cephadm.'
959 '\nglobal\n log '
960 '127.0.0.1 local2\n '
961 'chroot /var/lib/haproxy\n '
962 'pidfile /var/lib/haproxy/haproxy.pid\n '
963 'maxconn 8000\n '
964 'daemon\n '
965 'stats socket /var/lib/haproxy/stats\n'
966 '\ndefaults\n '
967 'mode http\n '
968 'log global\n '
969 'option httplog\n '
970 'option dontlognull\n '
971 'option http-server-close\n '
972 'option forwardfor except 127.0.0.0/8\n '
973 'option redispatch\n '
974 'retries 3\n '
975 'timeout queue 20s\n '
976 'timeout connect 5s\n '
977 'timeout http-request 1s\n '
978 'timeout http-keep-alive 5s\n '
979 'timeout client 1s\n '
980 'timeout server 1s\n '
981 'timeout check 5s\n '
982 'maxconn 8000\n'
983 '\nfrontend stats\n '
984 'mode http\n '
985 'bind 1.2.3.4:8999\n '
986 'bind localhost:8999\n '
987 'stats enable\n '
988 'stats uri /stats\n '
989 'stats refresh 10s\n '
990 'stats auth admin:12345\n '
991 'http-request use-service prometheus-exporter if { path /metrics }\n '
992 'monitor-uri /health\n'
993 '\nfrontend frontend\n '
994 'bind 1.2.3.4:8089\n '
995 'default_backend backend\n\n'
996 'backend backend\n '
997 'option forwardfor\n '
998 'balance static-rr\n '
999 'option httpchk HEAD / HTTP/1.0\n '
1000 'server '
1001 + haproxy_generated_conf[1][0] + ' 1::4:80 check weight 100\n'
1002 }
1003 }
1004
1005 assert haproxy_generated_conf[0] == haproxy_expected_conf
1006
1007 @patch("cephadm.serve.CephadmServe._run_cephadm")
1008 def test_ingress_config_multi_vips(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1009 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1010
1011 with with_host(cephadm_module, 'test'):
1012 cephadm_module.cache.update_host_networks('test', {
1013 '1.2.3.0/24': {
1014 'if0': ['1.2.3.4/32']
1015 }
1016 })
1017
1018 # Check the ingress with multiple VIPs
1019 s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
1020 rgw_frontend_type='beast')
1021
1022 ispec = IngressSpec(service_type='ingress',
1023 service_id='test',
1024 backend_service='rgw.foo',
1025 frontend_port=8089,
1026 monitor_port=8999,
1027 monitor_user='admin',
1028 monitor_password='12345',
1029 keepalived_password='12345',
1030 virtual_interface_networks=['1.2.3.0/24'],
1031 virtual_ips_list=["1.2.3.4/32"])
1032 with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
1033 # generate the keepalived conf based on the specified spec
1034 # Test with only 1 IP on the list, as it will fail with more VIPS but only one host.
1035 keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
1036 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1037
1038 keepalived_expected_conf = {
1039 'files':
1040 {
1041 'keepalived.conf':
1042 '# This file is generated by cephadm.\n'
1043 'vrrp_script check_backend {\n '
1044 'script "/usr/bin/curl http://localhost:8999/health"\n '
1045 'weight -20\n '
1046 'interval 2\n '
1047 'rise 2\n '
1048 'fall 2\n}\n\n'
1049 'vrrp_instance VI_0 {\n '
1050 'state MASTER\n '
1051 'priority 100\n '
1052 'interface if0\n '
1053 'virtual_router_id 50\n '
1054 'advert_int 1\n '
1055 'authentication {\n '
1056 'auth_type PASS\n '
1057 'auth_pass 12345\n '
1058 '}\n '
1059 'unicast_src_ip 1::4\n '
1060 'unicast_peer {\n '
1061 '}\n '
1062 'virtual_ipaddress {\n '
1063 '1.2.3.4/32 dev if0\n '
1064 '}\n '
1065 'track_script {\n '
1066 'check_backend\n }\n'
1067 '}\n'
1068 }
1069 }
1070
1071 # check keepalived config
1072 assert keepalived_generated_conf[0] == keepalived_expected_conf
1073
1074 # generate the haproxy conf based on the specified spec
1075 haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
1076 CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
1077
1078 haproxy_expected_conf = {
1079 'files':
1080 {
1081 'haproxy.cfg':
1082 '# This file is generated by cephadm.'
1083 '\nglobal\n log '
1084 '127.0.0.1 local2\n '
1085 'chroot /var/lib/haproxy\n '
1086 'pidfile /var/lib/haproxy/haproxy.pid\n '
1087 'maxconn 8000\n '
1088 'daemon\n '
1089 'stats socket /var/lib/haproxy/stats\n'
1090 '\ndefaults\n '
1091 'mode http\n '
1092 'log global\n '
1093 'option httplog\n '
1094 'option dontlognull\n '
1095 'option http-server-close\n '
1096 'option forwardfor except 127.0.0.0/8\n '
1097 'option redispatch\n '
1098 'retries 3\n '
1099 'timeout queue 20s\n '
1100 'timeout connect 5s\n '
1101 'timeout http-request 1s\n '
1102 'timeout http-keep-alive 5s\n '
1103 'timeout client 1s\n '
1104 'timeout server 1s\n '
1105 'timeout check 5s\n '
1106 'maxconn 8000\n'
1107 '\nfrontend stats\n '
1108 'mode http\n '
1109 'bind *:8999\n '
1110 'bind localhost:8999\n '
1111 'stats enable\n '
1112 'stats uri /stats\n '
1113 'stats refresh 10s\n '
1114 'stats auth admin:12345\n '
1115 'http-request use-service prometheus-exporter if { path /metrics }\n '
1116 'monitor-uri /health\n'
1117 '\nfrontend frontend\n '
1118 'bind *:8089\n '
1119 'default_backend backend\n\n'
1120 'backend backend\n '
1121 'option forwardfor\n '
1122 'balance static-rr\n '
1123 'option httpchk HEAD / HTTP/1.0\n '
1124 'server '
1125 + haproxy_generated_conf[1][0] + ' 1::4:80 check weight 100\n'
1126 }
1127 }
1128
1129 assert haproxy_generated_conf[0] == haproxy_expected_conf
1130
1131
1132 class TestCephFsMirror:
1133 @patch("cephadm.serve.CephadmServe._run_cephadm")
1134 def test_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
1135 _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
1136 with with_host(cephadm_module, 'test'):
1137 with with_service(cephadm_module, ServiceSpec('cephfs-mirror')):
1138 cephadm_module.assert_issued_mon_command({
1139 'prefix': 'mgr module enable',
1140 'module': 'mirroring'
1141 })