class TestMonitoring:
+ def _get_config(self, url: str) -> str:
+ return f"""
+ # This file is generated by cephadm.
+ # See https://prometheus.io/docs/alerting/configuration/ for documentation.
+
+ global:
+ resolve_timeout: 5m
+ http_config:
+ tls_config:
+ insecure_skip_verify: true
+
+ route:
+ receiver: 'default'
+ routes:
+ - group_by: ['alertname']
+ group_wait: 10s
+ group_interval: 10s
+ repeat_interval: 1h
+ receiver: 'ceph-dashboard'
+
+ receivers:
+ - name: 'default'
+ webhook_configs:
+ - name: 'ceph-dashboard'
+ webhook_configs:
+ - url: '{url}/api/prometheus_receiver'
+ """
+
@patch("cephadm.serve.CephadmServe._run_cephadm")
- def test_alertmanager_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+ @patch("mgr_module.MgrModule.get")
+ def test_alertmanager_config(self, mock_get, _run_cephadm,
+ cephadm_module: CephadmOrchestrator):
_run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+ mock_get.return_value = {"services": {"dashboard": "http://[::1]:8080"}}
with with_host(cephadm_module, 'test'):
with with_service(cephadm_module, AlertManagerSpec()):
+ y = dedent(self._get_config('http://localhost:8080')).lstrip()
+ _run_cephadm.assert_called_with(
+ 'test',
+ 'alertmanager.test',
+ 'deploy',
+ [
+ '--name', 'alertmanager.test',
+ '--meta-json', '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
+ '--config-json', '-', '--tcp-ports', '9093 9094'
+ ],
+ stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}),
+ image='')
- y = dedent("""
- # This file is generated by cephadm.
- # See https://prometheus.io/docs/alerting/configuration/ for documentation.
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ @patch("mgr_module.MgrModule.get")
+ def test_alertmanager_config_v6(self, mock_get, _run_cephadm,
+ cephadm_module: CephadmOrchestrator):
+ dashboard_url = "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080"
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+ mock_get.return_value = {"services": {"dashboard": dashboard_url}}
- global:
- resolve_timeout: 5m
- http_config:
- tls_config:
- insecure_skip_verify: true
-
- route:
- receiver: 'default'
- routes:
- - group_by: ['alertname']
- group_wait: 10s
- group_interval: 10s
- repeat_interval: 1h
- receiver: 'ceph-dashboard'
-
- receivers:
- - name: 'default'
- webhook_configs:
- - name: 'ceph-dashboard'
- webhook_configs:
- - url: 'http://[::1]:8080/api/prometheus_receiver'
- """).lstrip()
+ with with_host(cephadm_module, 'test'):
+ with with_service(cephadm_module, AlertManagerSpec()):
+ y = dedent(self._get_config(dashboard_url)).lstrip()
+ _run_cephadm.assert_called_with(
+ 'test',
+ 'alertmanager.test',
+ 'deploy',
+ [
+ '--name', 'alertmanager.test',
+ '--meta-json',
+ '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
+ '--config-json', '-', '--tcp-ports', '9093 9094'
+ ],
+ stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}),
+ image='')
+
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ @patch("mgr_module.MgrModule.get")
+ @patch("socket.getfqdn")
+ def test_alertmanager_config_v6_fqdn(self, mock_getfqdn, mock_get, _run_cephadm,
+ cephadm_module: CephadmOrchestrator):
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+ mock_getfqdn.return_value = "mgr.test.fqdn"
+ mock_get.return_value = {"services": {
+ "dashboard": "http://[2001:db8:4321:0000:0000:0000:0000:0000]:8080"}}
+ with with_host(cephadm_module, 'test'):
+ with with_service(cephadm_module, AlertManagerSpec()):
+ y = dedent(self._get_config("http://mgr.test.fqdn:8080")).lstrip()
+ _run_cephadm.assert_called_with(
+ 'test',
+ 'alertmanager.test',
+ 'deploy',
+ [
+ '--name', 'alertmanager.test',
+ '--meta-json',
+ '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
+ '--config-json', '-', '--tcp-ports', '9093 9094'
+ ],
+ stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}),
+ image='')
+
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ @patch("mgr_module.MgrModule.get")
+ def test_alertmanager_config_v4(self, mock_get, _run_cephadm, cephadm_module: CephadmOrchestrator):
+ dashboard_url = "http://192.168.0.123:8080"
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+ mock_get.return_value = {"services": {"dashboard": dashboard_url}}
+
+ with with_host(cephadm_module, 'test'):
+ with with_service(cephadm_module, AlertManagerSpec()):
+ y = dedent(self._get_config(dashboard_url)).lstrip()
_run_cephadm.assert_called_with(
'test',
'alertmanager.test',
'--config-json', '-', '--tcp-ports', '9093 9094'
],
stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}),
- image='')\
+ image='')
+
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ @patch("mgr_module.MgrModule.get")
+ @patch("socket.getfqdn")
+ def test_alertmanager_config_v4_fqdn(self, mock_getfqdn, mock_get, _run_cephadm,
+ cephadm_module: CephadmOrchestrator):
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+ mock_getfqdn.return_value = "mgr.test.fqdn"
+ mock_get.return_value = {"services": {"dashboard": "http://192.168.0.123:8080"}}
+ with with_host(cephadm_module, 'test'):
+ with with_service(cephadm_module, AlertManagerSpec()):
+ y = dedent(self._get_config("http://mgr.test.fqdn:8080")).lstrip()
+ _run_cephadm.assert_called_with(
+ 'test',
+ 'alertmanager.test',
+ 'deploy',
+ [
+ '--name', 'alertmanager.test',
+ '--meta-json',
+ '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
+ '--config-json', '-', '--tcp-ports', '9093 9094'
+ ],
+ stdin=json.dumps({"files": {"alertmanager.yml": y}, "peers": []}),
+ image='')
@patch("cephadm.serve.CephadmServe._run_cephadm")
def test_prometheus_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
'--config-json', '-',
'--tcp-ports', '9095'
],
- stdin=json.dumps({"files": {"prometheus.yml": y}}),
+ stdin=json.dumps({"files": {"prometheus.yml": y,
+ "/etc/prometheus/alerting/custom_alerts.yml": ""}}),
image='')
@patch("cephadm.serve.CephadmServe._run_cephadm")
filename: /tmp/positions.yaml
clients:
- - url: http://1::4:3100/loki/api/v1/push
+ - url: http://:3100/loki/api/v1/push
scrape_configs:
- job_name: system
static_configs:
- - targets:
- - 1::4
- labels:
+ - labels:
job: Cluster Logs
__path__: /var/log/ceph/**/*.log""").lstrip()
_run_cephadm.side_effect = async_side_effect(("{}", "", 0))
with with_host(cephadm_module, "test"):
- cephadm_module.set_store("grafana_crt", "c")
- cephadm_module.set_store("grafana_key", "k")
+ cephadm_module.set_store("test/grafana_crt", "c")
+ cephadm_module.set_store("test/grafana_key", "k")
with with_service(
cephadm_module, MonitoringSpec("prometheus")
) as _, with_service(cephadm_module, ServiceSpec("mgr")) as _, with_service(
type: 'loki'
access: 'proxy'
orgId: 2
- url: 'http://[1::4]:3100'
+ url: ''
basicAuth: false
isDefault: true
editable: false""").lstrip(),
" type: 'loki'\n"
" access: 'proxy'\n"
' orgId: 2\n'
- " url: 'http://[1::4]:3100'\n"
+ " url: ''\n"
' basicAuth: false\n'
' isDefault: true\n'
' editable: false',
'state MASTER\n '
'priority 100\n '
'interface if0\n '
- 'virtual_router_id 51\n '
+ 'virtual_router_id 50\n '
'advert_int 1\n '
'authentication {\n '
'auth_type PASS\n '
'}\n '
'track_script {\n '
'check_backend\n }\n'
- '}'
+ '}\n'
}
}
assert haproxy_generated_conf[0] == haproxy_expected_conf
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ def test_ingress_config_multi_vips(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+
+ with with_host(cephadm_module, 'test'):
+ cephadm_module.cache.update_host_networks('test', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.4/32']
+ }
+ })
+
+ # Check the ingress with multiple VIPs
+ s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
+ rgw_frontend_type='beast')
+
+ ispec = IngressSpec(service_type='ingress',
+ service_id='test',
+ backend_service='rgw.foo',
+ frontend_port=8089,
+ monitor_port=8999,
+ monitor_user='admin',
+ monitor_password='12345',
+ keepalived_password='12345',
+ virtual_interface_networks=['1.2.3.0/24'],
+ virtual_ips_list=["1.2.3.4/32"])
+ with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
+ # generate the keepalived conf based on the specified spec
+ # Test with only 1 IP on the list, as it will fail with more VIPS but only one host.
+ keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
+ CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
+
+ keepalived_expected_conf = {
+ 'files':
+ {
+ 'keepalived.conf':
+ '# This file is generated by cephadm.\n'
+ 'vrrp_script check_backend {\n '
+ 'script "/usr/bin/curl http://localhost:8999/health"\n '
+ 'weight -20\n '
+ 'interval 2\n '
+ 'rise 2\n '
+ 'fall 2\n}\n\n'
+ 'vrrp_instance VI_0 {\n '
+ 'state MASTER\n '
+ 'priority 100\n '
+ 'interface if0\n '
+ 'virtual_router_id 50\n '
+ 'advert_int 1\n '
+ 'authentication {\n '
+ 'auth_type PASS\n '
+ 'auth_pass 12345\n '
+ '}\n '
+ 'unicast_src_ip 1::4\n '
+ 'unicast_peer {\n '
+ '}\n '
+ 'virtual_ipaddress {\n '
+ '1.2.3.4/32 dev if0\n '
+ '}\n '
+ 'track_script {\n '
+ 'check_backend\n }\n'
+ '}\n'
+ }
+ }
+
+ # check keepalived config
+ assert keepalived_generated_conf[0] == keepalived_expected_conf
+
+ # generate the haproxy conf based on the specified spec
+ haproxy_generated_conf = cephadm_module.cephadm_services['ingress'].haproxy_generate_config(
+ CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
+
+ haproxy_expected_conf = {
+ 'files':
+ {
+ 'haproxy.cfg':
+ '# This file is generated by cephadm.'
+ '\nglobal\n log '
+ '127.0.0.1 local2\n '
+ 'chroot /var/lib/haproxy\n '
+ 'pidfile /var/lib/haproxy/haproxy.pid\n '
+ 'maxconn 8000\n '
+ 'daemon\n '
+ 'stats socket /var/lib/haproxy/stats\n'
+ '\ndefaults\n '
+ 'mode http\n '
+ 'log global\n '
+ 'option httplog\n '
+ 'option dontlognull\n '
+ 'option http-server-close\n '
+ 'option forwardfor except 127.0.0.0/8\n '
+ 'option redispatch\n '
+ 'retries 3\n '
+ 'timeout queue 20s\n '
+ 'timeout connect 5s\n '
+ 'timeout http-request 1s\n '
+ 'timeout http-keep-alive 5s\n '
+ 'timeout client 1s\n '
+ 'timeout server 1s\n '
+ 'timeout check 5s\n '
+ 'maxconn 8000\n'
+ '\nfrontend stats\n '
+ 'mode http\n '
+ 'bind *:8999\n '
+ 'bind localhost:8999\n '
+ 'stats enable\n '
+ 'stats uri /stats\n '
+ 'stats refresh 10s\n '
+ 'stats auth admin:12345\n '
+ 'http-request use-service prometheus-exporter if { path /metrics }\n '
+ 'monitor-uri /health\n'
+ '\nfrontend frontend\n '
+ 'bind *:8089\n '
+ 'default_backend backend\n\n'
+ 'backend backend\n '
+ 'option forwardfor\n '
+ 'balance static-rr\n '
+ 'option httpchk HEAD / HTTP/1.0\n '
+ 'server '
+ + haproxy_generated_conf[1][0] + ' 1::4:80 check weight 100\n'
+ }
+ }
+
+ assert haproxy_generated_conf[0] == haproxy_expected_conf
+
class TestCephFsMirror:
@patch("cephadm.serve.CephadmServe._run_cephadm")