]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/services/ingress.py
import ceph pacific 16.2.5
[ceph.git] / ceph / src / pybind / mgr / cephadm / services / ingress.py
1 import ipaddress
2 import logging
3 import random
4 import string
5 from typing import List, Dict, Any, Tuple, cast, Optional
6
7 from ceph.deployment.service_spec import IngressSpec
8 from cephadm.utils import resolve_ip
9 from orchestrator import OrchestratorError
10 from cephadm.services.cephadmservice import CephadmDaemonDeploySpec, CephService
11
12 logger = logging.getLogger(__name__)
13
14
15 class IngressService(CephService):
16 TYPE = 'ingress'
17
18 def primary_daemon_type(self) -> str:
19 return 'haproxy'
20
21 def per_host_daemon_type(self) -> Optional[str]:
22 return 'keepalived'
23
24 def prepare_create(
25 self,
26 daemon_spec: CephadmDaemonDeploySpec,
27 ) -> CephadmDaemonDeploySpec:
28 if daemon_spec.daemon_type == 'haproxy':
29 return self.haproxy_prepare_create(daemon_spec)
30 if daemon_spec.daemon_type == 'keepalived':
31 return self.keepalived_prepare_create(daemon_spec)
32 assert False, "unexpected daemon type"
33
34 def generate_config(
35 self,
36 daemon_spec: CephadmDaemonDeploySpec
37 ) -> Tuple[Dict[str, Any], List[str]]:
38 if daemon_spec.daemon_type == 'haproxy':
39 return self.haproxy_generate_config(daemon_spec)
40 else:
41 return self.keepalived_generate_config(daemon_spec)
42 assert False, "unexpected daemon type"
43
44 def haproxy_prepare_create(
45 self,
46 daemon_spec: CephadmDaemonDeploySpec,
47 ) -> CephadmDaemonDeploySpec:
48 assert daemon_spec.daemon_type == 'haproxy'
49
50 daemon_id = daemon_spec.daemon_id
51 host = daemon_spec.host
52 spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
53
54 logger.debug('prepare_create haproxy.%s on host %s with spec %s' % (
55 daemon_id, host, spec))
56
57 daemon_spec.final_config, daemon_spec.deps = self.haproxy_generate_config(daemon_spec)
58
59 return daemon_spec
60
61 def haproxy_generate_config(
62 self,
63 daemon_spec: CephadmDaemonDeploySpec,
64 ) -> Tuple[Dict[str, Any], List[str]]:
65 spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
66 assert spec.backend_service
67 if spec.backend_service not in self.mgr.spec_store:
68 raise RuntimeError(f'{spec.service_name()} backend service {spec.backend_service} does not exist')
69 backend_spec = self.mgr.spec_store[spec.backend_service].spec
70 daemons = self.mgr.cache.get_daemons_by_service(spec.backend_service)
71 deps = [d.name() for d in daemons]
72
73 # generate password?
74 pw_key = f'{spec.service_name()}/monitor_password'
75 password = self.mgr.get_store(pw_key)
76 if password is None:
77 if not spec.monitor_password:
78 password = ''.join(random.choice(string.ascii_lowercase) for _ in range(20))
79 self.mgr.set_store(pw_key, password)
80 else:
81 if spec.monitor_password:
82 self.mgr.set_store(pw_key, None)
83 if spec.monitor_password:
84 password = spec.monitor_password
85
86 if backend_spec.service_type == 'nfs':
87 mode = 'tcp'
88 by_rank = {d.rank: d for d in daemons if d.rank is not None}
89 servers = []
90
91 # try to establish how many ranks we *should* have
92 num_ranks = backend_spec.placement.count
93 if not num_ranks:
94 num_ranks = 1 + max(by_rank.keys())
95
96 for rank in range(num_ranks):
97 if rank in by_rank:
98 d = by_rank[rank]
99 assert(d.ports)
100 servers.append({
101 'name': f"{spec.backend_service}.{rank}",
102 'ip': d.ip or resolve_ip(self.mgr.inventory.get_addr(str(d.hostname))),
103 'port': d.ports[0],
104 })
105 else:
106 # offline/missing server; leave rank in place
107 servers.append({
108 'name': f"{spec.backend_service}.{rank}",
109 'ip': '0.0.0.0',
110 'port': 0,
111 })
112 else:
113 mode = 'http'
114 servers = [
115 {
116 'name': d.name(),
117 'ip': d.ip or resolve_ip(self.mgr.inventory.get_addr(str(d.hostname))),
118 'port': d.ports[0],
119 } for d in daemons if d.ports
120 ]
121
122 haproxy_conf = self.mgr.template.render(
123 'services/ingress/haproxy.cfg.j2',
124 {
125 'spec': spec,
126 'mode': mode,
127 'servers': servers,
128 'user': spec.monitor_user or 'admin',
129 'password': password,
130 'ip': daemon_spec.ip or '*',
131 'frontend_port': daemon_spec.ports[0] if daemon_spec.ports else spec.frontend_port,
132 'monitor_port': daemon_spec.ports[1] if daemon_spec.ports else spec.monitor_port,
133 }
134 )
135 config_files = {
136 'files': {
137 "haproxy.cfg": haproxy_conf,
138 }
139 }
140 if spec.ssl_cert:
141 ssl_cert = spec.ssl_cert
142 if isinstance(ssl_cert, list):
143 ssl_cert = '\n'.join(ssl_cert)
144 config_files['files']['haproxy.pem'] = ssl_cert
145
146 return config_files, sorted(deps)
147
148 def keepalived_prepare_create(
149 self,
150 daemon_spec: CephadmDaemonDeploySpec,
151 ) -> CephadmDaemonDeploySpec:
152 assert daemon_spec.daemon_type == 'keepalived'
153
154 daemon_id = daemon_spec.daemon_id
155 host = daemon_spec.host
156 spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
157
158 logger.debug('prepare_create keepalived.%s on host %s with spec %s' % (
159 daemon_id, host, spec))
160
161 daemon_spec.final_config, daemon_spec.deps = self.keepalived_generate_config(daemon_spec)
162
163 return daemon_spec
164
165 def keepalived_generate_config(
166 self,
167 daemon_spec: CephadmDaemonDeploySpec,
168 ) -> Tuple[Dict[str, Any], List[str]]:
169 spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
170 assert spec.backend_service
171
172 # generate password?
173 pw_key = f'{spec.service_name()}/keepalived_password'
174 password = self.mgr.get_store(pw_key)
175 if password is None:
176 if not spec.keepalived_password:
177 password = ''.join(random.choice(string.ascii_lowercase) for _ in range(20))
178 self.mgr.set_store(pw_key, password)
179 else:
180 if spec.keepalived_password:
181 self.mgr.set_store(pw_key, None)
182 if spec.keepalived_password:
183 password = spec.keepalived_password
184
185 daemons = self.mgr.cache.get_daemons_by_service(spec.service_name())
186 deps = sorted([d.name() for d in daemons if d.daemon_type == 'haproxy'])
187
188 host = daemon_spec.host
189 hosts = sorted(list(set([str(d.hostname) for d in daemons])))
190
191 # interface
192 bare_ip = str(spec.virtual_ip).split('/')[0]
193 interface = None
194 for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items():
195 if ifaces and ipaddress.ip_address(bare_ip) in ipaddress.ip_network(subnet):
196 interface = list(ifaces.keys())[0]
197 logger.info(
198 f'{bare_ip} is in {subnet} on {host} interface {interface}'
199 )
200 break
201 if not interface and spec.networks:
202 # hmm, try spec.networks
203 for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items():
204 if subnet in spec.networks:
205 interface = list(ifaces.keys())[0]
206 logger.info(
207 f'{spec.virtual_ip} will be configured on {host} interface '
208 f'{interface} (which has guiding subnet {subnet})'
209 )
210 break
211 if not interface:
212 raise OrchestratorError(
213 f"Unable to identify interface for {spec.virtual_ip} on {host}"
214 )
215
216 # script to monitor health
217 script = '/usr/bin/false'
218 for d in daemons:
219 if d.hostname == host:
220 if d.daemon_type == 'haproxy':
221 assert d.ports
222 port = d.ports[1] # monitoring port
223 script = f'/usr/bin/curl http://{d.ip or "localhost"}:{port}/health'
224 assert script
225
226 # set state. first host in placement is master all others backups
227 state = 'BACKUP'
228 if hosts[0] == host:
229 state = 'MASTER'
230
231 # remove host, daemon is being deployed on from hosts list for
232 # other_ips in conf file and converter to ips
233 if host in hosts:
234 hosts.remove(host)
235 other_ips = [resolve_ip(self.mgr.inventory.get_addr(h)) for h in hosts]
236
237 keepalived_conf = self.mgr.template.render(
238 'services/ingress/keepalived.conf.j2',
239 {
240 'spec': spec,
241 'script': script,
242 'password': password,
243 'interface': interface,
244 'state': state,
245 'other_ips': other_ips,
246 'host_ip': resolve_ip(self.mgr.inventory.get_addr(host)),
247 }
248 )
249
250 config_file = {
251 'files': {
252 "keepalived.conf": keepalived_conf,
253 }
254 }
255
256 return config_file, deps