RbdMirrorService, CrashService, CephadmDaemonDeploySpec
from cephadm.services.iscsi import IscsiService
from cephadm.services.nfs import NFSService
+from cephadm.services.nvmeof import NvmeofService
from cephadm.services.osd import OSDService
from cephadm.services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \
NodeExporterService, LokiService, PromtailService
from cephadm.module import CephadmOrchestrator
from ceph.deployment.service_spec import IscsiServiceSpec, MonitoringSpec, AlertManagerSpec, \
ServiceSpec, RGWSpec, GrafanaSpec, SNMPGatewaySpec, IngressSpec, PlacementSpec, TracingSpec, \
- PrometheusSpec, CephExporterSpec, NFSServiceSpec
+ PrometheusSpec, CephExporterSpec, NFSServiceSpec, NvmeofServiceSpec
from cephadm.tests.fixtures import with_host, with_service, _run_cephadm, async_side_effect
from ceph.utils import datetime_now
promtail_service = PromtailService(mgr)
crash_service = CrashService(mgr)
iscsi_service = IscsiService(mgr)
+ nvmeof_service = NvmeofService(mgr)
cephadm_services = {
'mon': mon_service,
'mgr': mgr_service,
'promtail': promtail_service,
'crash': crash_service,
'iscsi': iscsi_service,
+ 'nvmeof': nvmeof_service,
}
return cephadm_services
_run_cephadm.assert_called_with(
'test',
f'iscsi.{iscsi_daemon_id}',
- 'deploy',
- [
- '--name', f'iscsi.{iscsi_daemon_id}',
- '--meta-json', f'{"{"}"service_name": "iscsi.{pool}", "ports": [{api_port}], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null{"}"}',
- '--config-json', '-', '--tcp-ports', '3456'
- ],
- stdin=json.dumps({"config": "", "keyring": f"[client.iscsi.{iscsi_daemon_id}]\nkey = None\n", "files": {"iscsi-gateway.cfg": iscsi_gateway_conf}}),
- image='')
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": f'iscsi.{iscsi_daemon_id}',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [api_port],
+ },
+ "meta": {
+ 'service_name': f'iscsi.{pool}',
+ 'ports': [api_port],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": {
+ "config": "",
+ "keyring": f"[client.iscsi.{iscsi_daemon_id}]\nkey = None\n",
+ "files": {
+ "iscsi-gateway.cfg": iscsi_gateway_conf,
+ },
+ }
+ }),
+ )
+
+
+class TestNVMEOFService:
+
+ mgr = FakeMgr()
+ nvmeof_service = NvmeofService(mgr)
+
+ nvmeof_spec = NvmeofServiceSpec(service_type='nvmeof', service_id="a")
+ nvmeof_spec.daemon_type = 'nvmeof'
+ nvmeof_spec.daemon_id = "a"
+ nvmeof_spec.spec = MagicMock()
+ nvmeof_spec.spec.daemon_type = 'nvmeof'
+
+ mgr.spec_store = MagicMock()
+ mgr.spec_store.all_specs.get.return_value = nvmeof_spec
+
+ def test_nvmeof_client_caps(self):
+ pass
+
+ @patch('cephadm.utils.resolve_ip')
+ def test_nvmeof_dashboard_config(self, mock_resolve_ip):
+ pass
+
+ @patch("cephadm.inventory.Inventory.get_addr", lambda _, __: '192.168.100.100')
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ @patch("cephadm.module.CephadmOrchestrator.get_unique_name")
+ def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrchestrator):
+
+ nvmeof_daemon_id = 'testpool.test.qwert'
+ pool = 'testpool'
+ tgt_cmd_extra_args = '--cpumask=0xFF --msg-mempool-size=524288'
+ default_port = 5500
+ group = 'mygroup'
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+ _get_name.return_value = nvmeof_daemon_id
+
+ nvmeof_gateway_conf = f"""# This file is generated by cephadm.
+[gateway]
+name = client.nvmeof.{nvmeof_daemon_id}
+group = {group}
+addr = 192.168.100.100
+port = {default_port}
+enable_auth = False
+state_update_notify = True
+state_update_interval_sec = 5
+
+[ceph]
+pool = {pool}
+config_file = /etc/ceph/ceph.conf
+id = nvmeof.{nvmeof_daemon_id}
+
+[mtls]
+server_key = ./server.key
+client_key = ./client.key
+server_cert = ./server.crt
+client_cert = ./client.crt
+
+[spdk]
+tgt_path = /usr/local/bin/nvmf_tgt
+rpc_socket = /var/tmp/spdk.sock
+timeout = 60
+log_level = WARN
+conn_retries = 10
+transports = tcp
+transport_tcp_options = {{"in_capsule_data_size": 8192, "max_io_qpairs_per_ctrlr": 7}}
+tgt_cmd_extra_args = {tgt_cmd_extra_args}\n"""
+
+ with with_host(cephadm_module, 'test'):
+ with with_service(cephadm_module, NvmeofServiceSpec(service_id=pool,
+ tgt_cmd_extra_args=tgt_cmd_extra_args,
+ group=group,
+ pool=pool)):
+ _run_cephadm.assert_called_with(
+ 'test',
+ f'nvmeof.{nvmeof_daemon_id}',
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": "nvmeof.testpool.test.qwert",
+ "image": "",
+ "deploy_arguments": [],
+ "params": {
+ "tcp_ports": [5500, 4420, 8009]
+ },
+ "meta": {
+ "service_name": "nvmeof.testpool",
+ "ports": [5500, 4420, 8009],
+ "ip": None,
+ "deployed_by": [],
+ "rank": None,
+ "rank_generation": None,
+ "extra_container_args": None,
+ "extra_entrypoint_args": None
+ },
+ "config_blobs": {
+ "config": "",
+ "keyring": "[client.nvmeof.testpool.test.qwert]\nkey = None\n",
+ "files": {
+ "ceph-nvmeof.conf": nvmeof_gateway_conf
+ }
+ }
+ }),
+ )
class TestMonitoring:
def _get_config(self, url: str) -> str:
+
return f"""
# This file is generated by cephadm.
# See https://prometheus.io/docs/alerting/configuration/ for documentation.
with with_service(cephadm_module, AlertManagerSpec()):
y = dedent(self._get_config(expected_yaml_url)).lstrip()
_run_cephadm.assert_called_with(
- "test",
+ 'test',
"alertmanager.test",
- "deploy",
- [
- "--name",
- "alertmanager.test",
- "--meta-json",
- ('{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- "--config-json",
- "-",
- "--tcp-ports",
- "9093 9094",
- ],
- stdin=json.dumps(
- {"files": {"alertmanager.yml": y}, "peers": []}
- ),
- image="",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'alertmanager.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [9093, 9094],
+ },
+ "meta": {
+ 'service_name': 'alertmanager',
+ 'ports': [9093, 9094],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": {
+ "files": {
+ "alertmanager.yml": y,
+ },
+ "peers": [],
+ }
+ }),
)
@patch("cephadm.serve.CephadmServe._run_cephadm")
@patch("socket.getfqdn")
@patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
- @patch("cephadm.services.monitoring.password_hash", lambda password: 'fake_password')
+ @patch("cephadm.services.monitoring.password_hash", lambda password: 'alertmanager_password_hash')
def test_alertmanager_config_security_enabled(self, _get_fqdn, _run_cephadm, cephadm_module: CephadmOrchestrator):
_run_cephadm.side_effect = async_side_effect(('{}', '', 0))
with with_host(cephadm_module, 'test'):
cephadm_module.secure_monitoring_stack = True
- cephadm_module.alertmanager_web_password = 'fake_password'
- cephadm_module.alertmanager_web_user = 'admin'
+ cephadm_module.set_store(AlertmanagerService.USER_CFG_KEY, 'alertmanager_user')
+ cephadm_module.set_store(AlertmanagerService.PASS_CFG_KEY, 'alertmanager_plain_password')
cephadm_module.http_server.service_discovery.ssl_certs.generate_cert = MagicMock(side_effect=gen_cert)
cephadm_module.http_server.service_discovery.ssl_certs.get_root_cert = MagicMock(side_effect=get_root_cert)
with with_service(cephadm_module, AlertManagerSpec()):
cert_file: alertmanager.crt
key_file: alertmanager.key
basic_auth_users:
- admin: fake_password""").lstrip()
+ alertmanager_user: alertmanager_password_hash""").lstrip()
_run_cephadm.assert_called_with(
'test',
- 'alertmanager.test',
- 'deploy',
- [
- '--name', 'alertmanager.test',
- '--meta-json', '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}',
- '--config-json', '-', '--tcp-ports', '9093 9094'
- ],
+ "alertmanager.test",
+ ['_orch', 'deploy'],
+ [],
stdin=json.dumps({
- "files": {
- "alertmanager.yml": y,
- 'alertmanager.crt': 'mycert',
- 'alertmanager.key': 'mykey',
- 'web.yml': web_config,
- 'root_cert.pem': 'my_root_cert'
+ "fsid": "fsid",
+ "name": 'alertmanager.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [9093, 9094],
+ },
+ "meta": {
+ 'service_name': 'alertmanager',
+ 'ports': [9093, 9094],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
},
- 'peers': [],
- 'web_config': '/etc/alertmanager/web.yml'
+ "config_blobs": {
+ "files": {
+ "alertmanager.yml": y,
+ 'alertmanager.crt': 'mycert',
+ 'alertmanager.key': 'mykey',
+ 'web.yml': web_config,
+ 'root_cert.pem': 'my_root_cert'
+ },
+ 'peers': [],
+ 'web_config': '/etc/alertmanager/web.yml',
+ }
}),
- image='')
+ )
@patch("cephadm.serve.CephadmServe._run_cephadm")
@patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
_run_cephadm.side_effect = async_side_effect(('{}', '', 0))
s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), rgw_frontend_type='beast')
with with_host(cephadm_module, 'test'):
+ # host "test" needs to have networks for keepalive to be placed
+ cephadm_module.cache.update_host_networks('test', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.1']
+ },
+ })
with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \
with_service(cephadm_module, CephExporterSpec('ceph-exporter')) as _, \
with_service(cephadm_module, s) as _, \
_run_cephadm.assert_called_with(
'test',
- 'prometheus.test',
- 'deploy',
- [
- '--name', 'prometheus.test',
- '--meta-json',
- ('{"service_name": "prometheus", "ports": [9095], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '9095'
- ],
- stdin=json.dumps({"files": {"prometheus.yml": y,
- "/etc/prometheus/alerting/custom_alerts.yml": ""},
- 'retention_time': '15d',
- 'retention_size': '0'}),
- image='')
+ "prometheus.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'prometheus.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [9095],
+ },
+ "meta": {
+ 'service_name': 'prometheus',
+ 'ports': [9095],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": {
+ "files": {
+ "prometheus.yml": y,
+ "/etc/prometheus/alerting/custom_alerts.yml": "",
+ },
+ 'retention_time': '15d',
+ 'retention_size': '0',
+ },
+ }),
+ )
@patch("cephadm.serve.CephadmServe._run_cephadm")
@patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
- @patch("cephadm.services.monitoring.password_hash", lambda password: 'fake_password')
+ @patch("cephadm.services.monitoring.password_hash", lambda password: 'prometheus_password_hash')
def test_prometheus_config_security_enabled(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
_run_cephadm.side_effect = async_side_effect(('{}', '', 0))
s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), rgw_frontend_type='beast')
with with_host(cephadm_module, 'test'):
cephadm_module.secure_monitoring_stack = True
- cephadm_module.http_server.service_discovery.username = 'admin'
- cephadm_module.http_server.service_discovery.password = 'fake_password'
+ cephadm_module.set_store(PrometheusService.USER_CFG_KEY, 'prometheus_user')
+ cephadm_module.set_store(PrometheusService.PASS_CFG_KEY, 'prometheus_plain_password')
+ cephadm_module.set_store(AlertmanagerService.USER_CFG_KEY, 'alertmanager_user')
+ cephadm_module.set_store(AlertmanagerService.PASS_CFG_KEY, 'alertmanager_plain_password')
+ cephadm_module.http_server.service_discovery.username = 'sd_user'
+ cephadm_module.http_server.service_discovery.password = 'sd_password'
cephadm_module.http_server.service_discovery.ssl_certs.generate_cert = MagicMock(
side_effect=gen_cert)
+ # host "test" needs to have networks for keepalive to be placed
+ cephadm_module.cache.update_host_networks('test', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.1']
+ },
+ })
with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \
with_service(cephadm_module, s) as _, \
with_service(cephadm_module, AlertManagerSpec('alertmanager')) as _, \
cert_file: prometheus.crt
key_file: prometheus.key
basic_auth_users:
- admin: fake_password""").lstrip()
+ prometheus_user: prometheus_password_hash""").lstrip()
y = dedent("""
# This file is generated by cephadm.
alertmanagers:
- scheme: https
basic_auth:
- username: admin
- password: admin
+ username: alertmanager_user
+ password: alertmanager_plain_password
tls_config:
ca_file: root_cert.pem
http_sd_configs:
- url: https://[::1]:8765/sd/prometheus/sd-config?service=alertmanager
basic_auth:
- username: admin
- password: fake_password
+ username: sd_user
+ password: sd_password
tls_config:
ca_file: root_cert.pem
http_sd_configs:
- url: https://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
basic_auth:
- username: admin
- password: fake_password
+ username: sd_user
+ password: sd_password
tls_config:
ca_file: root_cert.pem
http_sd_configs:
- url: https://[::1]:8765/sd/prometheus/sd-config?service=node-exporter
basic_auth:
- username: admin
- password: fake_password
+ username: sd_user
+ password: sd_password
tls_config:
ca_file: root_cert.pem
http_sd_configs:
- url: https://[::1]:8765/sd/prometheus/sd-config?service=haproxy
basic_auth:
- username: admin
- password: fake_password
+ username: sd_user
+ password: sd_password
tls_config:
ca_file: root_cert.pem
http_sd_configs:
- url: https://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter
basic_auth:
- username: admin
- password: fake_password
+ username: sd_user
+ password: sd_password
tls_config:
ca_file: root_cert.pem
""").lstrip()
_run_cephadm.assert_called_with(
'test',
- 'prometheus.test',
- 'deploy',
- [
- '--name', 'prometheus.test',
- '--meta-json',
- '{"service_name": "prometheus", "ports": [9095], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}',
- '--config-json', '-',
- '--tcp-ports', '9095'
- ],
+ "prometheus.test",
+ ['_orch', 'deploy'],
+ [],
stdin=json.dumps({
- 'files': {
- 'prometheus.yml': y,
- 'root_cert.pem': '',
- 'mgr_prometheus_cert.pem': '',
- 'web.yml': web_config,
- 'prometheus.crt': 'mycert',
- 'prometheus.key': 'mykey',
- "/etc/prometheus/alerting/custom_alerts.yml": "",
+ "fsid": "fsid",
+ "name": 'prometheus.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [9095],
+ },
+ "meta": {
+ 'service_name': 'prometheus',
+ 'ports': [9095],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
},
- 'retention_time': '15d',
- 'retention_size': '0',
- 'web_config': '/etc/prometheus/web.yml'}),
- image=''
+ "config_blobs": {
+ 'files': {
+ 'prometheus.yml': y,
+ 'root_cert.pem': '',
+ 'mgr_prometheus_cert.pem': '',
+ 'web.yml': web_config,
+ 'prometheus.crt': 'mycert',
+ 'prometheus.key': 'mykey',
+ "/etc/prometheus/alerting/custom_alerts.yml": "",
+ },
+ 'retention_time': '15d',
+ 'retention_size': '0',
+ 'web_config': '/etc/prometheus/web.yml',
+ },
+ }),
)
@patch("cephadm.serve.CephadmServe._run_cephadm")
_run_cephadm.assert_called_with(
'test',
- 'loki.test',
- 'deploy',
- [
- '--name', 'loki.test',
- '--meta-json',
- ('{"service_name": "loki", "ports": [3100], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '3100'
- ],
- stdin=json.dumps({"files": {"loki.yml": y}}),
- image='')
+ "loki.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'loki.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [3100],
+ },
+ "meta": {
+ 'service_name': 'loki',
+ 'ports': [3100],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": {
+ "files": {
+ "loki.yml": y
+ },
+ },
+ }),
+ )
@patch("cephadm.serve.CephadmServe._run_cephadm")
def test_promtail_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
_run_cephadm.assert_called_with(
'test',
- 'promtail.test',
- 'deploy',
- [
- '--name', 'promtail.test',
- '--meta-json',
- ('{"service_name": "promtail", "ports": [9080], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '9080'
- ],
- stdin=json.dumps({"files": {"promtail.yml": y}}),
- image='')
+ "promtail.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'promtail.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [9080],
+ },
+ "meta": {
+ 'service_name': 'promtail',
+ 'ports': [9080],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": {
+ "files": {
+ "promtail.yml": y
+ },
+ },
+ }),
+ )
@patch("cephadm.serve.CephadmServe._run_cephadm")
@patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4')
_run_cephadm.assert_called_with(
'test',
- 'grafana.test',
- 'deploy',
- [
- '--name', 'grafana.test',
- '--meta-json',
- ('{"service_name": "grafana", "ports": [3000], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-', '--tcp-ports', '3000'],
- stdin=json.dumps({"files": files}),
- image='')
+ "grafana.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'grafana.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [3000],
+ },
+ "meta": {
+ 'service_name': 'grafana',
+ 'ports': [3000],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": {
+ "files": files,
+ },
+ }),
+ )
@patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
def test_grafana_initial_admin_pw(self, cephadm_module: CephadmOrchestrator):
CephadmServe(cephadm_module)._check_daemons()
_run_cephadm.assert_called_with(
- 'test', 'alertmanager.test', 'deploy', [
- '--name', 'alertmanager.test',
- '--meta-json', ('{"service_name": "alertmanager", "ports": [4200, 9094], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '4200 9094',
- '--reconfig'
- ],
- stdin='{}',
- image='')
+ 'test',
+ "alertmanager.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'alertmanager.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [4200, 9094],
+ 'reconfig': True,
+ },
+ "meta": {
+ 'service_name': 'alertmanager',
+ 'ports': [4200, 9094],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": {},
+ }),
+ )
class TestRGWService:
with with_service(cephadm_module, spec):
_run_cephadm.assert_called_with(
'test',
- 'snmp-gateway.test',
- 'deploy',
- [
- '--name', 'snmp-gateway.test',
- '--meta-json',
- ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '9464'
- ],
- stdin=json.dumps(config),
- image=''
+ "snmp-gateway.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'snmp-gateway.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [9464],
+ },
+ "meta": {
+ 'service_name': 'snmp-gateway',
+ 'ports': [9464],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": config,
+ }),
)
@patch("cephadm.serve.CephadmServe._run_cephadm")
with with_service(cephadm_module, spec):
_run_cephadm.assert_called_with(
'test',
- 'snmp-gateway.test',
- 'deploy',
- [
- '--name', 'snmp-gateway.test',
- '--meta-json',
- ('{"service_name": "snmp-gateway", "ports": [9465], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '9465'
- ],
- stdin=json.dumps(config),
- image=''
+ "snmp-gateway.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'snmp-gateway.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [9465],
+ },
+ "meta": {
+ 'service_name': 'snmp-gateway',
+ 'ports': [9465],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": config,
+ }),
)
@patch("cephadm.serve.CephadmServe._run_cephadm")
with with_service(cephadm_module, spec):
_run_cephadm.assert_called_with(
'test',
- 'snmp-gateway.test',
- 'deploy',
- [
- '--name', 'snmp-gateway.test',
- '--meta-json',
- ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '9464'
- ],
- stdin=json.dumps(config),
- image=''
+ "snmp-gateway.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'snmp-gateway.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [9464],
+ },
+ "meta": {
+ 'service_name': 'snmp-gateway',
+ 'ports': [9464],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": config,
+ }),
)
@patch("cephadm.serve.CephadmServe._run_cephadm")
with with_service(cephadm_module, spec):
_run_cephadm.assert_called_with(
'test',
- 'snmp-gateway.test',
- 'deploy',
- [
- '--name', 'snmp-gateway.test',
- '--meta-json',
- ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '9464'
- ],
- stdin=json.dumps(config),
- image=''
+ "snmp-gateway.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'snmp-gateway.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [9464],
+ },
+ "meta": {
+ 'service_name': 'snmp-gateway',
+ 'ports': [9464],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": config,
+ }),
)
class TestIngressService:
+ @pytest.mark.parametrize(
+ "enable_haproxy_protocol",
+ [False, True],
+ )
@patch("cephadm.inventory.Inventory.get_addr")
@patch("cephadm.utils.resolve_ip")
@patch("cephadm.inventory.HostCache.get_daemons_by_service")
@patch("cephadm.serve.CephadmServe._run_cephadm")
- def test_ingress_config_nfs_multiple_nfs_same_rank(self, _run_cephadm, _get_daemons_by_service, _resolve_ip, _get_addr, cephadm_module: CephadmOrchestrator):
+ def test_ingress_config_nfs_multiple_nfs_same_rank(
+ self,
+ _run_cephadm,
+ _get_daemons_by_service,
+ _resolve_ip, _get_addr,
+ cephadm_module: CephadmOrchestrator,
+ enable_haproxy_protocol: bool,
+ ):
_run_cephadm.side_effect = async_side_effect(('{}', '', 0))
def fake_resolve_ip(hostname: str) -> str:
return hostname
_get_addr.side_effect = fake_get_addr
- nfs_service = NFSServiceSpec(service_id="foo", placement=PlacementSpec(count=1, hosts=['host1', 'host2']),
- port=12049)
-
- ispec = IngressSpec(service_type='ingress',
- service_id='nfs.foo',
- backend_service='nfs.foo',
- frontend_port=2049,
- monitor_port=9049,
- virtual_ip='192.168.122.100/24',
- monitor_user='admin',
- monitor_password='12345',
- keepalived_password='12345')
+ nfs_service = NFSServiceSpec(
+ service_id="foo",
+ placement=PlacementSpec(
+ count=1,
+ hosts=['host1', 'host2']),
+ port=12049,
+ enable_haproxy_protocol=enable_haproxy_protocol,
+ )
+
+ ispec = IngressSpec(
+ service_type='ingress',
+ service_id='nfs.foo',
+ backend_service='nfs.foo',
+ frontend_port=2049,
+ monitor_port=9049,
+ virtual_ip='192.168.122.100/24',
+ monitor_user='admin',
+ monitor_password='12345',
+ keepalived_password='12345',
+ enable_haproxy_protocol=enable_haproxy_protocol,
+ )
cephadm_module.spec_store._specs = {
'nfs.foo': nfs_service,
# for the host1 nfs daemon as we'll end up giving that
# one higher rank_generation but the same rank as the one
# on host2
+ haproxy_txt = (
+ '# This file is generated by cephadm.\n'
+ 'global\n'
+ ' log 127.0.0.1 local2\n'
+ ' chroot /var/lib/haproxy\n'
+ ' pidfile /var/lib/haproxy/haproxy.pid\n'
+ ' maxconn 8000\n'
+ ' daemon\n'
+ ' stats socket /var/lib/haproxy/stats\n\n'
+ 'defaults\n'
+ ' mode tcp\n'
+ ' log global\n'
+ ' timeout queue 1m\n'
+ ' timeout connect 10s\n'
+ ' timeout client 1m\n'
+ ' timeout server 1m\n'
+ ' timeout check 10s\n'
+ ' maxconn 8000\n\n'
+ 'frontend stats\n'
+ ' mode http\n'
+ ' bind 192.168.122.100:9049\n'
+ ' bind host1:9049\n'
+ ' stats enable\n'
+ ' stats uri /stats\n'
+ ' stats refresh 10s\n'
+ ' stats auth admin:12345\n'
+ ' http-request use-service prometheus-exporter if { path /metrics }\n'
+ ' monitor-uri /health\n\n'
+ 'frontend frontend\n'
+ ' bind 192.168.122.100:2049\n'
+ ' default_backend backend\n\n'
+ 'backend backend\n'
+ ' mode tcp\n'
+ ' balance source\n'
+ ' hash-type consistent\n'
+ )
+ if enable_haproxy_protocol:
+ haproxy_txt += ' default-server send-proxy-v2\n'
+ haproxy_txt += ' server nfs.foo.0 192.168.122.111:12049\n'
haproxy_expected_conf = {
- 'files':
- {
- 'haproxy.cfg':
- '# This file is generated by cephadm.\n'
- 'global\n'
- ' log 127.0.0.1 local2\n'
- ' chroot /var/lib/haproxy\n'
- ' pidfile /var/lib/haproxy/haproxy.pid\n'
- ' maxconn 8000\n'
- ' daemon\n'
- ' stats socket /var/lib/haproxy/stats\n\n'
- 'defaults\n'
- ' mode tcp\n'
- ' log global\n'
- ' timeout queue 1m\n'
- ' timeout connect 10s\n'
- ' timeout client 1m\n'
- ' timeout server 1m\n'
- ' timeout check 10s\n'
- ' maxconn 8000\n\n'
- 'frontend stats\n'
- ' mode http\n'
- ' bind 192.168.122.100:9049\n'
- ' bind host1:9049\n'
- ' stats enable\n'
- ' stats uri /stats\n'
- ' stats refresh 10s\n'
- ' stats auth admin:12345\n'
- ' http-request use-service prometheus-exporter if { path /metrics }\n'
- ' monitor-uri /health\n\n'
- 'frontend frontend\n'
- ' bind 192.168.122.100:2049\n'
- ' default_backend backend\n\n'
- 'backend backend\n'
- ' mode tcp\n'
- ' balance source\n'
- ' hash-type consistent\n'
- ' server nfs.foo.0 192.168.122.111:12049\n'
- }
+ 'files': {'haproxy.cfg': haproxy_txt}
}
# verify we get the same cfg regardless of the order in which the nfs daemons are returned
with with_host(cephadm_module, 'test', addr='1.2.3.7'):
cephadm_module.cache.update_host_networks('test', {
'1.2.3.0/24': {
- 'if0': ['1.2.3.4/32']
+ 'if0': ['1.2.3.4']
}
})
'auth_type PASS\n '
'auth_pass 12345\n '
'}\n '
- 'unicast_src_ip 1.2.3.7\n '
+ 'unicast_src_ip 1.2.3.4\n '
'unicast_peer {\n '
'}\n '
'virtual_ipaddress {\n '
with with_host(cephadm_module, 'test'):
cephadm_module.cache.update_host_networks('test', {
'1.2.3.0/24': {
- 'if0': ['1.2.3.4/32']
+ 'if0': ['1.2.3.1']
}
})
'auth_type PASS\n '
'auth_pass 12345\n '
'}\n '
- 'unicast_src_ip 1::4\n '
+ 'unicast_src_ip 1.2.3.1\n '
'unicast_peer {\n '
'}\n '
'virtual_ipaddress {\n '
with with_host(cephadm_module, 'test', addr='1.2.3.7'):
cephadm_module.cache.update_host_networks('test', {
'1.2.3.0/24': {
- 'if0': ['1.2.3.4/32']
+ 'if0': ['1.2.3.1']
}
})
'auth_type PASS\n '
'auth_pass 12345\n '
'}\n '
- 'unicast_src_ip 1.2.3.7\n '
+ 'unicast_src_ip 1.2.3.1\n '
'unicast_peer {\n '
'}\n '
'virtual_ipaddress {\n '
assert haproxy_generated_conf[0] == haproxy_expected_conf
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ def test_haproxy_port_ips(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+
+ with with_host(cephadm_module, 'test', addr='1.2.3.7'):
+ cephadm_module.cache.update_host_networks('test', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.4/32']
+ }
+ })
+
+ # Check the ingress with multiple VIPs
+ s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
+ rgw_frontend_type='beast')
+
+ ip = '1.2.3.100'
+ frontend_port = 8089
+
+ ispec = IngressSpec(service_type='ingress',
+ service_id='test',
+ backend_service='rgw.foo',
+ frontend_port=frontend_port,
+ monitor_port=8999,
+ monitor_user='admin',
+ monitor_password='12345',
+ keepalived_password='12345',
+ virtual_ip=f"{ip}/24")
+ with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
+ # generate the haproxy conf based on the specified spec
+ haproxy_daemon_spec = cephadm_module.cephadm_services['ingress'].prepare_create(
+ CephadmDaemonDeploySpec(
+ host='test',
+ daemon_type='haproxy',
+ daemon_id='ingress',
+ service_name=ispec.service_name()))
+
+ assert haproxy_daemon_spec.port_ips == {str(frontend_port): ip}
+
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ def test_keepalive_config_multi_interface_vips(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+
+ with with_host(cephadm_module, 'test', addr='1.2.3.1'):
+ with with_host(cephadm_module, 'test2', addr='1.2.3.2'):
+ cephadm_module.cache.update_host_networks('test', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.1']
+ },
+ '100.100.100.0/24': {
+ 'if1': ['100.100.100.1']
+ }
+ })
+ cephadm_module.cache.update_host_networks('test2', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.2']
+ },
+ '100.100.100.0/24': {
+ 'if1': ['100.100.100.2']
+ }
+ })
+
+ # Check the ingress with multiple VIPs
+ s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
+ rgw_frontend_type='beast')
+
+ ispec = IngressSpec(service_type='ingress',
+ service_id='test',
+ placement=PlacementSpec(hosts=['test', 'test2']),
+ backend_service='rgw.foo',
+ frontend_port=8089,
+ monitor_port=8999,
+ monitor_user='admin',
+ monitor_password='12345',
+ keepalived_password='12345',
+ virtual_ips_list=["1.2.3.100/24", "100.100.100.100/24"])
+ with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
+ keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config(
+ CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name()))
+
+ keepalived_expected_conf = {
+ 'files':
+ {
+ 'keepalived.conf':
+ '# This file is generated by cephadm.\n'
+ 'vrrp_script check_backend {\n '
+ 'script "/usr/bin/curl http://1.2.3.1:8999/health"\n '
+ 'weight -20\n '
+ 'interval 2\n '
+ 'rise 2\n '
+ 'fall 2\n}\n\n'
+ 'vrrp_instance VI_0 {\n '
+ 'state MASTER\n '
+ 'priority 100\n '
+ 'interface if0\n '
+ 'virtual_router_id 50\n '
+ 'advert_int 1\n '
+ 'authentication {\n '
+ 'auth_type PASS\n '
+ 'auth_pass 12345\n '
+ '}\n '
+ 'unicast_src_ip 1.2.3.1\n '
+ 'unicast_peer {\n '
+ '1.2.3.2\n '
+ '}\n '
+ 'virtual_ipaddress {\n '
+ '1.2.3.100/24 dev if0\n '
+ '}\n '
+ 'track_script {\n '
+ 'check_backend\n }\n'
+ '}\n'
+ 'vrrp_instance VI_1 {\n '
+ 'state BACKUP\n '
+ 'priority 90\n '
+ 'interface if1\n '
+ 'virtual_router_id 51\n '
+ 'advert_int 1\n '
+ 'authentication {\n '
+ 'auth_type PASS\n '
+ 'auth_pass 12345\n '
+ '}\n '
+ 'unicast_src_ip 100.100.100.1\n '
+ 'unicast_peer {\n '
+ '100.100.100.2\n '
+ '}\n '
+ 'virtual_ipaddress {\n '
+ '100.100.100.100/24 dev if1\n '
+ '}\n '
+ 'track_script {\n '
+ 'check_backend\n }\n'
+ '}\n'
+ }
+ }
+
+ # check keepalived config
+ assert keepalived_generated_conf[0] == keepalived_expected_conf
+
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ def test_keepalive_interface_host_filtering(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+ # we need to make sure keepalive daemons will have an interface
+ # on the hosts we deploy them on in order to set up their VIP.
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+
+ with with_host(cephadm_module, 'test', addr='1.2.3.1'):
+ with with_host(cephadm_module, 'test2', addr='1.2.3.2'):
+ with with_host(cephadm_module, 'test3', addr='1.2.3.3'):
+ with with_host(cephadm_module, 'test4', addr='1.2.3.3'):
+ # setup "test" and "test4" to have all the necessary interfaces,
+ # "test2" to have one of them (should still be filtered)
+ # and "test3" to have none of them
+ cephadm_module.cache.update_host_networks('test', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.1']
+ },
+ '100.100.100.0/24': {
+ 'if1': ['100.100.100.1']
+ }
+ })
+ cephadm_module.cache.update_host_networks('test2', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.2']
+ },
+ })
+ cephadm_module.cache.update_host_networks('test4', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.4']
+ },
+ '100.100.100.0/24': {
+ 'if1': ['100.100.100.4']
+ }
+ })
+
+ s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1),
+ rgw_frontend_type='beast')
+
+ ispec = IngressSpec(service_type='ingress',
+ service_id='test',
+ placement=PlacementSpec(hosts=['test', 'test2', 'test3', 'test4']),
+ backend_service='rgw.foo',
+ frontend_port=8089,
+ monitor_port=8999,
+ monitor_user='admin',
+ monitor_password='12345',
+ keepalived_password='12345',
+ virtual_ips_list=["1.2.3.100/24", "100.100.100.100/24"])
+ with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _:
+ # since we're never actually going to refresh the host here,
+ # check the tmp daemons to see what was placed during the apply
+ daemons = cephadm_module.cache._get_tmp_daemons()
+ keepalive_daemons = [d for d in daemons if d.daemon_type == 'keepalived']
+ hosts_deployed_on = [d.hostname for d in keepalive_daemons]
+ assert 'test' in hosts_deployed_on
+ assert 'test2' not in hosts_deployed_on
+ assert 'test3' not in hosts_deployed_on
+ assert 'test4' in hosts_deployed_on
+
@patch("cephadm.serve.CephadmServe._run_cephadm")
@patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
@patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
with with_host(cephadm_module, 'test', addr='1.2.3.7'):
cephadm_module.cache.update_host_networks('test', {
'1.2.3.0/24': {
- 'if0': ['1.2.3.4/32']
+ 'if0': ['1.2.3.1']
}
})
'auth_type PASS\n '
'auth_pass 12345\n '
'}\n '
- 'unicast_src_ip 1.2.3.7\n '
+ 'unicast_src_ip 1.2.3.1\n '
'unicast_peer {\n '
'}\n '
'virtual_ipaddress {\n '
# check keepalived config
assert keepalived_generated_conf[0] == keepalived_expected_conf
+ @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
+ @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
+ @patch("cephadm.services.nfs.NFSService.purge", MagicMock())
+ @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock())
+ @patch("cephadm.inventory.Inventory.keys")
+ @patch("cephadm.inventory.Inventory.get_addr")
+ @patch("cephadm.utils.resolve_ip")
+ @patch("cephadm.inventory.HostCache.get_daemons_by_service")
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ def test_ingress_config_nfs_proxy_protocol(
+ self,
+ _run_cephadm,
+ _get_daemons_by_service,
+ _resolve_ip,
+ _get_addr,
+ _inventory_keys,
+ cephadm_module: CephadmOrchestrator,
+ ):
+ """Verify that setting enable_haproxy_protocol for both ingress and
+ nfs services sets the desired configuration parameters in both
+ the haproxy config and nfs ganesha config.
+ """
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+
+ def fake_resolve_ip(hostname: str) -> str:
+ if hostname in ('host1', "192.168.122.111"):
+ return '192.168.122.111'
+ elif hostname in ('host2', '192.168.122.222'):
+ return '192.168.122.222'
+ else:
+ raise KeyError(hostname)
+ _resolve_ip.side_effect = fake_resolve_ip
+ _get_addr.side_effect = fake_resolve_ip
+
+ def fake_keys():
+ return ['host1', 'host2']
+ _inventory_keys.side_effect = fake_keys
+
+ nfs_service = NFSServiceSpec(
+ service_id="foo",
+ placement=PlacementSpec(
+ count=1,
+ hosts=['host1', 'host2']),
+ port=12049,
+ enable_haproxy_protocol=True,
+ )
+
+ ispec = IngressSpec(
+ service_type='ingress',
+ service_id='nfs.foo',
+ backend_service='nfs.foo',
+ frontend_port=2049,
+ monitor_port=9049,
+ virtual_ip='192.168.122.100/24',
+ monitor_user='admin',
+ monitor_password='12345',
+ keepalived_password='12345',
+ enable_haproxy_protocol=True,
+ )
+
+ cephadm_module.spec_store._specs = {
+ 'nfs.foo': nfs_service,
+ 'ingress.nfs.foo': ispec
+ }
+ cephadm_module.spec_store.spec_created = {
+ 'nfs.foo': datetime_now(),
+ 'ingress.nfs.foo': datetime_now()
+ }
+
+ haproxy_txt = (
+ '# This file is generated by cephadm.\n'
+ 'global\n'
+ ' log 127.0.0.1 local2\n'
+ ' chroot /var/lib/haproxy\n'
+ ' pidfile /var/lib/haproxy/haproxy.pid\n'
+ ' maxconn 8000\n'
+ ' daemon\n'
+ ' stats socket /var/lib/haproxy/stats\n\n'
+ 'defaults\n'
+ ' mode tcp\n'
+ ' log global\n'
+ ' timeout queue 1m\n'
+ ' timeout connect 10s\n'
+ ' timeout client 1m\n'
+ ' timeout server 1m\n'
+ ' timeout check 10s\n'
+ ' maxconn 8000\n\n'
+ 'frontend stats\n'
+ ' mode http\n'
+ ' bind 192.168.122.100:9049\n'
+ ' bind 192.168.122.111:9049\n'
+ ' stats enable\n'
+ ' stats uri /stats\n'
+ ' stats refresh 10s\n'
+ ' stats auth admin:12345\n'
+ ' http-request use-service prometheus-exporter if { path /metrics }\n'
+ ' monitor-uri /health\n\n'
+ 'frontend frontend\n'
+ ' bind 192.168.122.100:2049\n'
+ ' default_backend backend\n\n'
+ 'backend backend\n'
+ ' mode tcp\n'
+ ' balance source\n'
+ ' hash-type consistent\n'
+ ' default-server send-proxy-v2\n'
+ ' server nfs.foo.0 192.168.122.111:12049\n'
+ )
+ haproxy_expected_conf = {
+ 'files': {'haproxy.cfg': haproxy_txt}
+ }
+
+ nfs_ganesha_txt = (
+ "# This file is generated by cephadm.\n"
+ 'NFS_CORE_PARAM {\n'
+ ' Enable_NLM = false;\n'
+ ' Enable_RQUOTA = false;\n'
+ ' Protocols = 4;\n'
+ ' NFS_Port = 2049;\n'
+ ' HAProxy_Hosts = 192.168.122.111, 10.10.2.20, 192.168.122.222;\n'
+ '}\n'
+ '\n'
+ 'NFSv4 {\n'
+ ' Delegations = false;\n'
+ " RecoveryBackend = 'rados_cluster';\n"
+ ' Minor_Versions = 1, 2;\n'
+ '}\n'
+ '\n'
+ 'RADOS_KV {\n'
+ ' UserId = "nfs.foo.test.0.0";\n'
+ ' nodeid = "nfs.foo.None";\n'
+ ' pool = ".nfs";\n'
+ ' namespace = "foo";\n'
+ '}\n'
+ '\n'
+ 'RADOS_URLS {\n'
+ ' UserId = "nfs.foo.test.0.0";\n'
+ ' watch_url = '
+ '"rados://.nfs/foo/conf-nfs.foo";\n'
+ '}\n'
+ '\n'
+ 'RGW {\n'
+ ' cluster = "ceph";\n'
+ ' name = "client.nfs.foo.test.0.0-rgw";\n'
+ '}\n'
+ '\n'
+ "%url rados://.nfs/foo/conf-nfs.foo"
+ )
+ nfs_expected_conf = {
+ 'files': {'ganesha.conf': nfs_ganesha_txt},
+ 'config': '',
+ 'extra_args': ['-N', 'NIV_EVENT'],
+ 'keyring': (
+ '[client.nfs.foo.test.0.0]\n'
+ 'key = None\n'
+ ),
+ 'namespace': 'foo',
+ 'pool': '.nfs',
+ 'rgw': {
+ 'cluster': 'ceph',
+ 'keyring': (
+ '[client.nfs.foo.test.0.0-rgw]\n'
+ 'key = None\n'
+ ),
+ 'user': 'nfs.foo.test.0.0-rgw',
+ },
+ 'userid': 'nfs.foo.test.0.0',
+ }
+
+ nfs_daemons = [
+ DaemonDescription(
+ daemon_type='nfs',
+ daemon_id='foo.0.1.host1.qwerty',
+ hostname='host1',
+ rank=0,
+ rank_generation=1,
+ ports=[12049],
+ ),
+ DaemonDescription(
+ daemon_type='nfs',
+ daemon_id='foo.0.0.host2.abcdef',
+ hostname='host2',
+ rank=0,
+ rank_generation=0,
+ ports=[12049],
+ ),
+ ]
+ _get_daemons_by_service.return_value = nfs_daemons
+
+ ingress_svc = cephadm_module.cephadm_services['ingress']
+ nfs_svc = cephadm_module.cephadm_services['nfs']
+
+ # add host network info to one host to test the behavior of
+ # adding all known-good addresses of the host to the list.
+ cephadm_module.cache.update_host_networks('host1', {
+ # this one is additional
+ '10.10.2.0/24': {
+ 'eth1': ['10.10.2.20']
+ },
+ # this is redundant and will be skipped
+ '192.168.122.0/24': {
+ 'eth0': ['192.168.122.111']
+ },
+ # this is a link-local address and will be ignored
+ "fe80::/64": {
+ "veth0": [
+ "fe80::8cf5:25ff:fe1c:d963"
+ ],
+ "eth0": [
+ "fe80::c7b:cbff:fef6:7370"
+ ],
+ "eth1": [
+ "fe80::7201:25a7:390b:d9a7"
+ ]
+ },
+ })
+
+ haproxy_generated_conf, _ = ingress_svc.haproxy_generate_config(
+ CephadmDaemonDeploySpec(
+ host='host1',
+ daemon_id='ingress',
+ service_name=ispec.service_name(),
+ ),
+ )
+ assert haproxy_generated_conf == haproxy_expected_conf
+
+ nfs_generated_conf, _ = nfs_svc.generate_config(
+ CephadmDaemonDeploySpec(
+ host='test',
+ daemon_id='foo.test.0.0',
+ service_name=nfs_service.service_name(),
+ ),
+ )
+ assert nfs_generated_conf == nfs_expected_conf
+
class TestCephFsMirror:
@patch("cephadm.serve.CephadmServe._run_cephadm")
with with_service(cephadm_module, spec):
_run_cephadm.assert_called_with(
'test',
- 'jaeger-query.test',
- 'deploy',
- [
- '--name', 'jaeger-query.test',
- '--meta-json',
- ('{"service_name": "jaeger-query", "ports": [16686], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '16686'
-
- ],
- stdin=json.dumps(config),
- image=''
+ "jaeger-query.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'jaeger-query.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [16686],
+ },
+ "meta": {
+ 'service_name': 'jaeger-query',
+ 'ports': [16686],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": config,
+ }),
)
@patch("cephadm.serve.CephadmServe._run_cephadm")
"elasticsearch_nodes": f'http://{build_url(host=cephadm_module.inventory.get_addr("test"), port=9200).lstrip("/")}'}
with with_service(cephadm_module, es_spec):
_run_cephadm.assert_called_with(
- 'test',
- 'elasticsearch.test',
- 'deploy',
- [
- '--name', 'elasticsearch.test',
- '--meta-json',
- ('{"service_name": "elasticsearch", "ports": [9200], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '9200'
-
- ],
- stdin=json.dumps(es_config),
- image=''
+ "test",
+ "elasticsearch.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'elasticsearch.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [9200],
+ },
+ "meta": {
+ 'service_name': 'elasticsearch',
+ 'ports': [9200],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": es_config,
+ }),
)
with with_service(cephadm_module, collector_spec):
_run_cephadm.assert_called_with(
- 'test',
- 'jaeger-collector.test',
- 'deploy',
- [
- '--name', 'jaeger-collector.test',
- '--meta-json',
- ('{"service_name": "jaeger-collector", "ports": [14250], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '14250'
-
- ],
- stdin=json.dumps(collector_config),
- image=''
+ "test",
+ "jaeger-collector.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'jaeger-collector.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [14250],
+ },
+ "meta": {
+ 'service_name': 'jaeger-collector',
+ 'ports': [14250],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": collector_config,
+ }),
)
@patch("cephadm.serve.CephadmServe._run_cephadm")
with with_host(cephadm_module, 'test'):
with with_service(cephadm_module, collector_spec):
_run_cephadm.assert_called_with(
- 'test',
- 'jaeger-collector.test',
- 'deploy',
- [
- '--name', 'jaeger-collector.test',
- '--meta-json',
- ('{"service_name": "jaeger-collector", "ports": [14250], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '14250'
-
- ],
- stdin=json.dumps(collector_config),
- image=''
+ "test",
+ "jaeger-collector.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'jaeger-collector.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [14250],
+ },
+ "meta": {
+ 'service_name': 'jaeger-collector',
+ 'ports': [14250],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": collector_config,
+ }),
)
with with_service(cephadm_module, agent_spec):
_run_cephadm.assert_called_with(
- 'test',
- 'jaeger-agent.test',
- 'deploy',
- [
- '--name', 'jaeger-agent.test',
- '--meta-json',
- ('{"service_name": "jaeger-agent", "ports": [6799], "ip": null, "deployed_by": [], "rank": null, '
- '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'),
- '--config-json', '-',
- '--tcp-ports', '6799'
-
- ],
- stdin=json.dumps(agent_config),
- image=''
+ "test",
+ "jaeger-agent.test",
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps({
+ "fsid": "fsid",
+ "name": 'jaeger-agent.test',
+ "image": '',
+ "deploy_arguments": [],
+ "params": {
+ 'tcp_ports': [6799],
+ },
+ "meta": {
+ 'service_name': 'jaeger-agent',
+ 'ports': [6799],
+ 'ip': None,
+ 'deployed_by': [],
+ 'rank': None,
+ 'rank_generation': None,
+ 'extra_container_args': None,
+ 'extra_entrypoint_args': None,
+ },
+ "config_blobs": agent_config,
+ }),
)