]> git.proxmox.com Git - ceph.git/blob - ceph/src/python-common/ceph/deployment/service_spec.py
import ceph 16.2.7
[ceph.git] / ceph / src / python-common / ceph / deployment / service_spec.py
1 import fnmatch
2 import re
3 from collections import OrderedDict
4 from functools import wraps
5 from ipaddress import ip_network, ip_address
6 from typing import Optional, Dict, Any, List, Union, Callable, Iterable, Type, TypeVar, cast, \
7 NamedTuple, Mapping
8
9 import yaml
10
11 from ceph.deployment.hostspec import HostSpec, SpecValidationError
12 from ceph.deployment.utils import unwrap_ipv6
13
14 ServiceSpecT = TypeVar('ServiceSpecT', bound='ServiceSpec')
15 FuncT = TypeVar('FuncT', bound=Callable)
16
17
18 def assert_valid_host(name: str) -> None:
19 p = re.compile('^[a-zA-Z0-9-]+$')
20 try:
21 assert len(name) <= 250, 'name is too long (max 250 chars)'
22 for part in name.split('.'):
23 assert len(part) > 0, '.-delimited name component must not be empty'
24 assert len(part) <= 63, '.-delimited name component must not be more than 63 chars'
25 assert p.match(part), 'name component must include only a-z, 0-9, and -'
26 except AssertionError as e:
27 raise SpecValidationError(str(e))
28
29
30 def handle_type_error(method: FuncT) -> FuncT:
31 @wraps(method)
32 def inner(cls: Any, *args: Any, **kwargs: Any) -> Any:
33 try:
34 return method(cls, *args, **kwargs)
35 except (TypeError, AttributeError) as e:
36 error_msg = '{}: {}'.format(cls.__name__, e)
37 raise SpecValidationError(error_msg)
38 return cast(FuncT, inner)
39
40
41 class HostPlacementSpec(NamedTuple):
42 hostname: str
43 network: str
44 name: str
45
46 def __str__(self) -> str:
47 res = ''
48 res += self.hostname
49 if self.network:
50 res += ':' + self.network
51 if self.name:
52 res += '=' + self.name
53 return res
54
55 @classmethod
56 @handle_type_error
57 def from_json(cls, data: Union[dict, str]) -> 'HostPlacementSpec':
58 if isinstance(data, str):
59 return cls.parse(data)
60 return cls(**data)
61
62 def to_json(self) -> str:
63 return str(self)
64
65 @classmethod
66 def parse(cls, host, require_network=True):
67 # type: (str, bool) -> HostPlacementSpec
68 """
69 Split host into host, network, and (optional) daemon name parts. The network
70 part can be an IP, CIDR, or ceph addrvec like '[v2:1.2.3.4:3300,v1:1.2.3.4:6789]'.
71 e.g.,
72 "myhost"
73 "myhost=name"
74 "myhost:1.2.3.4"
75 "myhost:1.2.3.4=name"
76 "myhost:1.2.3.0/24"
77 "myhost:1.2.3.0/24=name"
78 "myhost:[v2:1.2.3.4:3000]=name"
79 "myhost:[v2:1.2.3.4:3000,v1:1.2.3.4:6789]=name"
80 """
81 # Matches from start to : or = or until end of string
82 host_re = r'^(.*?)(:|=|$)'
83 # Matches from : to = or until end of string
84 ip_re = r':(.*?)(=|$)'
85 # Matches from = to end of string
86 name_re = r'=(.*?)$'
87
88 # assign defaults
89 host_spec = cls('', '', '')
90
91 match_host = re.search(host_re, host)
92 if match_host:
93 host_spec = host_spec._replace(hostname=match_host.group(1))
94
95 name_match = re.search(name_re, host)
96 if name_match:
97 host_spec = host_spec._replace(name=name_match.group(1))
98
99 ip_match = re.search(ip_re, host)
100 if ip_match:
101 host_spec = host_spec._replace(network=ip_match.group(1))
102
103 if not require_network:
104 return host_spec
105
106 networks = list() # type: List[str]
107 network = host_spec.network
108 # in case we have [v2:1.2.3.4:3000,v1:1.2.3.4:6478]
109 if ',' in network:
110 networks = [x for x in network.split(',')]
111 else:
112 if network != '':
113 networks.append(network)
114
115 for network in networks:
116 # only if we have versioned network configs
117 if network.startswith('v') or network.startswith('[v'):
118 # if this is ipv6 we can't just simply split on ':' so do
119 # a split once and rsplit once to leave us with just ipv6 addr
120 network = network.split(':', 1)[1]
121 network = network.rsplit(':', 1)[0]
122 try:
123 # if subnets are defined, also verify the validity
124 if '/' in network:
125 ip_network(network)
126 else:
127 ip_address(unwrap_ipv6(network))
128 except ValueError as e:
129 # logging?
130 raise e
131 host_spec.validate()
132 return host_spec
133
134 def validate(self) -> None:
135 assert_valid_host(self.hostname)
136
137
138 class PlacementSpec(object):
139 """
140 For APIs that need to specify a host subset
141 """
142
143 def __init__(self,
144 label=None, # type: Optional[str]
145 hosts=None, # type: Union[List[str],List[HostPlacementSpec], None]
146 count=None, # type: Optional[int]
147 count_per_host=None, # type: Optional[int]
148 host_pattern=None, # type: Optional[str]
149 ):
150 # type: (...) -> None
151 self.label = label
152 self.hosts = [] # type: List[HostPlacementSpec]
153
154 if hosts:
155 self.set_hosts(hosts)
156
157 self.count = count # type: Optional[int]
158 self.count_per_host = count_per_host # type: Optional[int]
159
160 #: fnmatch patterns to select hosts. Can also be a single host.
161 self.host_pattern = host_pattern # type: Optional[str]
162
163 self.validate()
164
165 def is_empty(self) -> bool:
166 return (
167 self.label is None
168 and not self.hosts
169 and not self.host_pattern
170 and self.count is None
171 and self.count_per_host is None
172 )
173
174 def __eq__(self, other: Any) -> bool:
175 if isinstance(other, PlacementSpec):
176 return self.label == other.label \
177 and self.hosts == other.hosts \
178 and self.count == other.count \
179 and self.host_pattern == other.host_pattern \
180 and self.count_per_host == other.count_per_host
181 return NotImplemented
182
183 def set_hosts(self, hosts: Union[List[str], List[HostPlacementSpec]]) -> None:
184 # To backpopulate the .hosts attribute when using labels or count
185 # in the orchestrator backend.
186 if all([isinstance(host, HostPlacementSpec) for host in hosts]):
187 self.hosts = hosts # type: ignore
188 else:
189 self.hosts = [HostPlacementSpec.parse(x, require_network=False) # type: ignore
190 for x in hosts if x]
191
192 # deprecated
193 def filter_matching_hosts(self, _get_hosts_func: Callable) -> List[str]:
194 return self.filter_matching_hostspecs(_get_hosts_func(as_hostspec=True))
195
196 def filter_matching_hostspecs(self, hostspecs: Iterable[HostSpec]) -> List[str]:
197 if self.hosts:
198 all_hosts = [hs.hostname for hs in hostspecs]
199 return [h.hostname for h in self.hosts if h.hostname in all_hosts]
200 if self.label:
201 return [hs.hostname for hs in hostspecs if self.label in hs.labels]
202 all_hosts = [hs.hostname for hs in hostspecs]
203 if self.host_pattern:
204 return fnmatch.filter(all_hosts, self.host_pattern)
205 return all_hosts
206
207 def get_target_count(self, hostspecs: Iterable[HostSpec]) -> int:
208 if self.count:
209 return self.count
210 return len(self.filter_matching_hostspecs(hostspecs)) * (self.count_per_host or 1)
211
212 def pretty_str(self) -> str:
213 """
214 >>> #doctest: +SKIP
215 ... ps = PlacementSpec(...) # For all placement specs:
216 ... PlacementSpec.from_string(ps.pretty_str()) == ps
217 """
218 kv = []
219 if self.hosts:
220 kv.append(';'.join([str(h) for h in self.hosts]))
221 if self.count:
222 kv.append('count:%d' % self.count)
223 if self.count_per_host:
224 kv.append('count-per-host:%d' % self.count_per_host)
225 if self.label:
226 kv.append('label:%s' % self.label)
227 if self.host_pattern:
228 kv.append(self.host_pattern)
229 return ';'.join(kv)
230
231 def __repr__(self) -> str:
232 kv = []
233 if self.count:
234 kv.append('count=%d' % self.count)
235 if self.count_per_host:
236 kv.append('count_per_host=%d' % self.count_per_host)
237 if self.label:
238 kv.append('label=%s' % repr(self.label))
239 if self.hosts:
240 kv.append('hosts={!r}'.format(self.hosts))
241 if self.host_pattern:
242 kv.append('host_pattern={!r}'.format(self.host_pattern))
243 return "PlacementSpec(%s)" % ', '.join(kv)
244
245 @classmethod
246 @handle_type_error
247 def from_json(cls, data: dict) -> 'PlacementSpec':
248 c = data.copy()
249 hosts = c.get('hosts', [])
250 if hosts:
251 c['hosts'] = []
252 for host in hosts:
253 c['hosts'].append(HostPlacementSpec.from_json(host))
254 _cls = cls(**c)
255 _cls.validate()
256 return _cls
257
258 def to_json(self) -> dict:
259 r: Dict[str, Any] = {}
260 if self.label:
261 r['label'] = self.label
262 if self.hosts:
263 r['hosts'] = [host.to_json() for host in self.hosts]
264 if self.count:
265 r['count'] = self.count
266 if self.count_per_host:
267 r['count_per_host'] = self.count_per_host
268 if self.host_pattern:
269 r['host_pattern'] = self.host_pattern
270 return r
271
272 def validate(self) -> None:
273 if self.hosts and self.label:
274 # TODO: a less generic Exception
275 raise SpecValidationError('Host and label are mutually exclusive')
276 if self.count is not None and self.count <= 0:
277 raise SpecValidationError("num/count must be > 1")
278 if self.count_per_host is not None and self.count_per_host < 1:
279 raise SpecValidationError("count-per-host must be >= 1")
280 if self.count_per_host is not None and not (
281 self.label
282 or self.hosts
283 or self.host_pattern
284 ):
285 raise SpecValidationError(
286 "count-per-host must be combined with label or hosts or host_pattern"
287 )
288 if self.count is not None and self.count_per_host is not None:
289 raise SpecValidationError("cannot combine count and count-per-host")
290 if (
291 self.count_per_host is not None
292 and self.hosts
293 and any([hs.network or hs.name for hs in self.hosts])
294 ):
295 raise SpecValidationError(
296 "count-per-host cannot be combined explicit placement with names or networks"
297 )
298 if self.host_pattern and self.hosts:
299 raise SpecValidationError('cannot combine host patterns and hosts')
300 for h in self.hosts:
301 h.validate()
302
303 @classmethod
304 def from_string(cls, arg):
305 # type: (Optional[str]) -> PlacementSpec
306 """
307 A single integer is parsed as a count:
308 >>> PlacementSpec.from_string('3')
309 PlacementSpec(count=3)
310
311 A list of names is parsed as host specifications:
312 >>> PlacementSpec.from_string('host1 host2')
313 PlacementSpec(hosts=[HostPlacementSpec(hostname='host1', network='', name=''), HostPlacemen\
314 tSpec(hostname='host2', network='', name='')])
315
316 You can also prefix the hosts with a count as follows:
317 >>> PlacementSpec.from_string('2 host1 host2')
318 PlacementSpec(count=2, hosts=[HostPlacementSpec(hostname='host1', network='', name=''), Hos\
319 tPlacementSpec(hostname='host2', network='', name='')])
320
321 You can spefify labels using `label:<label>`
322 >>> PlacementSpec.from_string('label:mon')
323 PlacementSpec(label='mon')
324
325 Labels als support a count:
326 >>> PlacementSpec.from_string('3 label:mon')
327 PlacementSpec(count=3, label='mon')
328
329 fnmatch is also supported:
330 >>> PlacementSpec.from_string('data[1-3]')
331 PlacementSpec(host_pattern='data[1-3]')
332
333 >>> PlacementSpec.from_string(None)
334 PlacementSpec()
335 """
336 if arg is None or not arg:
337 strings = []
338 elif isinstance(arg, str):
339 if ' ' in arg:
340 strings = arg.split(' ')
341 elif ';' in arg:
342 strings = arg.split(';')
343 elif ',' in arg and '[' not in arg:
344 # FIXME: this isn't quite right. we want to avoid breaking
345 # a list of mons with addrvecs... so we're basically allowing
346 # , most of the time, except when addrvecs are used. maybe
347 # ok?
348 strings = arg.split(',')
349 else:
350 strings = [arg]
351 else:
352 raise SpecValidationError('invalid placement %s' % arg)
353
354 count = None
355 count_per_host = None
356 if strings:
357 try:
358 count = int(strings[0])
359 strings = strings[1:]
360 except ValueError:
361 pass
362 for s in strings:
363 if s.startswith('count:'):
364 try:
365 count = int(s[len('count:'):])
366 strings.remove(s)
367 break
368 except ValueError:
369 pass
370 for s in strings:
371 if s.startswith('count-per-host:'):
372 try:
373 count_per_host = int(s[len('count-per-host:'):])
374 strings.remove(s)
375 break
376 except ValueError:
377 pass
378
379 advanced_hostspecs = [h for h in strings if
380 (':' in h or '=' in h or not any(c in '[]?*:=' for c in h)) and
381 'label:' not in h]
382 for a_h in advanced_hostspecs:
383 strings.remove(a_h)
384
385 labels = [x for x in strings if 'label:' in x]
386 if len(labels) > 1:
387 raise SpecValidationError('more than one label provided: {}'.format(labels))
388 for l in labels:
389 strings.remove(l)
390 label = labels[0][6:] if labels else None
391
392 host_patterns = strings
393 if len(host_patterns) > 1:
394 raise SpecValidationError(
395 'more than one host pattern provided: {}'.format(host_patterns))
396
397 ps = PlacementSpec(count=count,
398 count_per_host=count_per_host,
399 hosts=advanced_hostspecs,
400 label=label,
401 host_pattern=host_patterns[0] if host_patterns else None)
402 return ps
403
404
405 class ServiceSpec(object):
406 """
407 Details of service creation.
408
409 Request to the orchestrator for a cluster of daemons
410 such as MDS, RGW, iscsi gateway, MONs, MGRs, Prometheus
411
412 This structure is supposed to be enough information to
413 start the services.
414 """
415 KNOWN_SERVICE_TYPES = 'alertmanager crash grafana iscsi mds mgr mon nfs ' \
416 'node-exporter osd prometheus rbd-mirror rgw ' \
417 'container cephadm-exporter ingress cephfs-mirror'.split()
418 REQUIRES_SERVICE_ID = 'iscsi mds nfs osd rgw container ingress '.split()
419 MANAGED_CONFIG_OPTIONS = [
420 'mds_join_fs',
421 ]
422
423 @classmethod
424 def _cls(cls: Type[ServiceSpecT], service_type: str) -> Type[ServiceSpecT]:
425 from ceph.deployment.drive_group import DriveGroupSpec
426
427 ret = {
428 'rgw': RGWSpec,
429 'nfs': NFSServiceSpec,
430 'osd': DriveGroupSpec,
431 'iscsi': IscsiServiceSpec,
432 'alertmanager': AlertManagerSpec,
433 'ingress': IngressSpec,
434 'container': CustomContainerSpec,
435 'grafana': MonitoringSpec,
436 'node-exporter': MonitoringSpec,
437 'prometheus': MonitoringSpec,
438 }.get(service_type, cls)
439 if ret == ServiceSpec and not service_type:
440 raise SpecValidationError('Spec needs a "service_type" key.')
441 return ret
442
443 def __new__(cls: Type[ServiceSpecT], *args: Any, **kwargs: Any) -> ServiceSpecT:
444 """
445 Some Python foo to make sure, we don't have an object
446 like `ServiceSpec('rgw')` of type `ServiceSpec`. Now we have:
447
448 >>> type(ServiceSpec('rgw')) == type(RGWSpec('rgw'))
449 True
450
451 """
452 if cls != ServiceSpec:
453 return object.__new__(cls)
454 service_type = kwargs.get('service_type', args[0] if args else None)
455 sub_cls: Any = cls._cls(service_type)
456 return object.__new__(sub_cls)
457
458 def __init__(self,
459 service_type: str,
460 service_id: Optional[str] = None,
461 placement: Optional[PlacementSpec] = None,
462 count: Optional[int] = None,
463 config: Optional[Dict[str, str]] = None,
464 unmanaged: bool = False,
465 preview_only: bool = False,
466 networks: Optional[List[str]] = None,
467 ):
468
469 #: See :ref:`orchestrator-cli-placement-spec`.
470 self.placement = PlacementSpec() if placement is None else placement # type: PlacementSpec
471
472 assert service_type in ServiceSpec.KNOWN_SERVICE_TYPES, service_type
473 #: The type of the service. Needs to be either a Ceph
474 #: service (``mon``, ``crash``, ``mds``, ``mgr``, ``osd`` or
475 #: ``rbd-mirror``), a gateway (``nfs`` or ``rgw``), part of the
476 #: monitoring stack (``alertmanager``, ``grafana``, ``node-exporter`` or
477 #: ``prometheus``) or (``container``) for custom containers.
478 self.service_type = service_type
479
480 #: The name of the service. Required for ``iscsi``, ``mds``, ``nfs``, ``osd``, ``rgw``,
481 #: ``container``, ``ingress``
482 self.service_id = None
483
484 if self.service_type in self.REQUIRES_SERVICE_ID:
485 self.service_id = service_id
486
487 #: If set to ``true``, the orchestrator will not deploy nor remove
488 #: any daemon associated with this service. Placement and all other properties
489 #: will be ignored. This is useful, if you do not want this service to be
490 #: managed temporarily. For cephadm, See :ref:`cephadm-spec-unmanaged`
491 self.unmanaged = unmanaged
492 self.preview_only = preview_only
493
494 #: A list of network identities instructing the daemons to only bind
495 #: on the particular networks in that list. In case the cluster is distributed
496 #: across multiple networks, you can add multiple networks. See
497 #: :ref:`cephadm-monitoring-networks-ports`,
498 #: :ref:`cephadm-rgw-networks` and :ref:`cephadm-mgr-networks`.
499 self.networks: List[str] = networks or []
500
501 self.config: Optional[Dict[str, str]] = None
502 if config:
503 self.config = {k.replace(' ', '_'): v for k, v in config.items()}
504
505 @classmethod
506 @handle_type_error
507 def from_json(cls: Type[ServiceSpecT], json_spec: Dict) -> ServiceSpecT:
508 """
509 Initialize 'ServiceSpec' object data from a json structure
510
511 There are two valid styles for service specs:
512
513 the "old" style:
514
515 .. code:: yaml
516
517 service_type: nfs
518 service_id: foo
519 pool: mypool
520 namespace: myns
521
522 and the "new" style:
523
524 .. code:: yaml
525
526 service_type: nfs
527 service_id: foo
528 config:
529 some_option: the_value
530 networks: [10.10.0.0/16]
531 spec:
532 pool: mypool
533 namespace: myns
534
535 In https://tracker.ceph.com/issues/45321 we decided that we'd like to
536 prefer the new style as it is more readable and provides a better
537 understanding of what fields are special for a give service type.
538
539 Note, we'll need to stay compatible with both versions for the
540 the next two major releases (octoups, pacific).
541
542 :param json_spec: A valid dict with ServiceSpec
543
544 :meta private:
545 """
546
547 if not isinstance(json_spec, dict):
548 raise SpecValidationError(
549 f'Service Spec is not an (JSON or YAML) object. got "{str(json_spec)}"')
550
551 json_spec = cls.normalize_json(json_spec)
552
553 c = json_spec.copy()
554
555 # kludge to make `from_json` compatible to `Orchestrator.describe_service`
556 # Open question: Remove `service_id` form to_json?
557 if c.get('service_name', ''):
558 service_type_id = c['service_name'].split('.', 1)
559
560 if not c.get('service_type', ''):
561 c['service_type'] = service_type_id[0]
562 if not c.get('service_id', '') and len(service_type_id) > 1:
563 c['service_id'] = service_type_id[1]
564 del c['service_name']
565
566 service_type = c.get('service_type', '')
567 _cls = cls._cls(service_type)
568
569 if 'status' in c:
570 del c['status'] # kludge to make us compatible to `ServiceDescription.to_json()`
571
572 return _cls._from_json_impl(c) # type: ignore
573
574 @staticmethod
575 def normalize_json(json_spec: dict) -> dict:
576 networks = json_spec.get('networks')
577 if networks is None:
578 return json_spec
579 if isinstance(networks, list):
580 return json_spec
581 if not isinstance(networks, str):
582 raise SpecValidationError(f'Networks ({networks}) must be a string or list of strings')
583 json_spec['networks'] = [networks]
584 return json_spec
585
586 @classmethod
587 def _from_json_impl(cls: Type[ServiceSpecT], json_spec: dict) -> ServiceSpecT:
588 args = {} # type: Dict[str, Any]
589 for k, v in json_spec.items():
590 if k == 'placement':
591 v = PlacementSpec.from_json(v)
592 if k == 'spec':
593 args.update(v)
594 continue
595 args.update({k: v})
596 _cls = cls(**args)
597 _cls.validate()
598 return _cls
599
600 def service_name(self) -> str:
601 n = self.service_type
602 if self.service_id:
603 n += '.' + self.service_id
604 return n
605
606 def get_port_start(self) -> List[int]:
607 # If defined, we will allocate and number ports starting at this
608 # point.
609 return []
610
611 def get_virtual_ip(self) -> Optional[str]:
612 return None
613
614 def to_json(self):
615 # type: () -> OrderedDict[str, Any]
616 ret: OrderedDict[str, Any] = OrderedDict()
617 ret['service_type'] = self.service_type
618 if self.service_id:
619 ret['service_id'] = self.service_id
620 ret['service_name'] = self.service_name()
621 ret['placement'] = self.placement.to_json()
622 if self.unmanaged:
623 ret['unmanaged'] = self.unmanaged
624 if self.networks:
625 ret['networks'] = self.networks
626
627 c = {}
628 for key, val in sorted(self.__dict__.items(), key=lambda tpl: tpl[0]):
629 if key in ret:
630 continue
631 if hasattr(val, 'to_json'):
632 val = val.to_json()
633 if val:
634 c[key] = val
635 if c:
636 ret['spec'] = c
637 return ret
638
639 def validate(self) -> None:
640 if not self.service_type:
641 raise SpecValidationError('Cannot add Service: type required')
642
643 if self.service_type in self.REQUIRES_SERVICE_ID:
644 if not self.service_id:
645 raise SpecValidationError('Cannot add Service: id required')
646 if not re.match('^[a-zA-Z0-9_.-]+$', self.service_id):
647 raise SpecValidationError('Service id contains invalid characters, '
648 'only [a-zA-Z0-9_.-] allowed')
649 elif self.service_id:
650 raise SpecValidationError(
651 f'Service of type \'{self.service_type}\' should not contain a service id')
652
653 if self.placement is not None:
654 self.placement.validate()
655 if self.config:
656 for k, v in self.config.items():
657 if k in self.MANAGED_CONFIG_OPTIONS:
658 raise SpecValidationError(
659 f'Cannot set config option {k} in spec: it is managed by cephadm'
660 )
661 for network in self.networks or []:
662 try:
663 ip_network(network)
664 except ValueError as e:
665 raise SpecValidationError(
666 f'Cannot parse network {network}: {e}'
667 )
668
669 def __repr__(self) -> str:
670 return "{}({!r})".format(self.__class__.__name__, self.__dict__)
671
672 def __eq__(self, other: Any) -> bool:
673 return (self.__class__ == other.__class__
674 and
675 self.__dict__ == other.__dict__)
676
677 def one_line_str(self) -> str:
678 return '<{} for service_name={}>'.format(self.__class__.__name__, self.service_name())
679
680 @staticmethod
681 def yaml_representer(dumper: 'yaml.SafeDumper', data: 'ServiceSpec') -> Any:
682 return dumper.represent_dict(cast(Mapping, data.to_json().items()))
683
684
685 yaml.add_representer(ServiceSpec, ServiceSpec.yaml_representer)
686
687
688 class NFSServiceSpec(ServiceSpec):
689 def __init__(self,
690 service_type: str = 'nfs',
691 service_id: Optional[str] = None,
692 placement: Optional[PlacementSpec] = None,
693 unmanaged: bool = False,
694 preview_only: bool = False,
695 config: Optional[Dict[str, str]] = None,
696 networks: Optional[List[str]] = None,
697 port: Optional[int] = None,
698 ):
699 assert service_type == 'nfs'
700 super(NFSServiceSpec, self).__init__(
701 'nfs', service_id=service_id,
702 placement=placement, unmanaged=unmanaged, preview_only=preview_only,
703 config=config, networks=networks)
704
705 self.port = port
706
707 def get_port_start(self) -> List[int]:
708 if self.port:
709 return [self.port]
710 return []
711
712 def rados_config_name(self):
713 # type: () -> str
714 return 'conf-' + self.service_name()
715
716
717 yaml.add_representer(NFSServiceSpec, ServiceSpec.yaml_representer)
718
719
720 class RGWSpec(ServiceSpec):
721 """
722 Settings to configure a (multisite) Ceph RGW
723
724 .. code-block:: yaml
725
726 service_type: rgw
727 service_id: myrealm.myzone
728 spec:
729 rgw_realm: myrealm
730 rgw_zone: myzone
731 ssl: true
732 rgw_frontend_port: 1234
733 rgw_frontend_type: beast
734 rgw_frontend_ssl_certificate: ...
735
736 See also: :ref:`orchestrator-cli-service-spec`
737 """
738
739 MANAGED_CONFIG_OPTIONS = ServiceSpec.MANAGED_CONFIG_OPTIONS + [
740 'rgw_zone',
741 'rgw_realm',
742 'rgw_frontends',
743 ]
744
745 def __init__(self,
746 service_type: str = 'rgw',
747 service_id: Optional[str] = None,
748 placement: Optional[PlacementSpec] = None,
749 rgw_realm: Optional[str] = None,
750 rgw_zone: Optional[str] = None,
751 rgw_frontend_port: Optional[int] = None,
752 rgw_frontend_ssl_certificate: Optional[List[str]] = None,
753 rgw_frontend_type: Optional[str] = None,
754 unmanaged: bool = False,
755 ssl: bool = False,
756 preview_only: bool = False,
757 config: Optional[Dict[str, str]] = None,
758 networks: Optional[List[str]] = None,
759 subcluster: Optional[str] = None, # legacy, only for from_json on upgrade
760 ):
761 assert service_type == 'rgw', service_type
762
763 # for backward compatibility with octopus spec files,
764 if not service_id and (rgw_realm and rgw_zone):
765 service_id = rgw_realm + '.' + rgw_zone
766
767 super(RGWSpec, self).__init__(
768 'rgw', service_id=service_id,
769 placement=placement, unmanaged=unmanaged,
770 preview_only=preview_only, config=config, networks=networks)
771
772 #: The RGW realm associated with this service. Needs to be manually created
773 self.rgw_realm: Optional[str] = rgw_realm
774 #: The RGW zone associated with this service. Needs to be manually created
775 self.rgw_zone: Optional[str] = rgw_zone
776 #: Port of the RGW daemons
777 self.rgw_frontend_port: Optional[int] = rgw_frontend_port
778 #: List of SSL certificates
779 self.rgw_frontend_ssl_certificate: Optional[List[str]] = rgw_frontend_ssl_certificate
780 #: civetweb or beast (default: beast). See :ref:`rgw_frontends`
781 self.rgw_frontend_type: Optional[str] = rgw_frontend_type
782 #: enable SSL
783 self.ssl = ssl
784
785 def get_port_start(self) -> List[int]:
786 return [self.get_port()]
787
788 def get_port(self) -> int:
789 if self.rgw_frontend_port:
790 return self.rgw_frontend_port
791 if self.ssl:
792 return 443
793 else:
794 return 80
795
796 def validate(self) -> None:
797 super(RGWSpec, self).validate()
798
799 if self.rgw_realm and not self.rgw_zone:
800 raise SpecValidationError(
801 'Cannot add RGW: Realm specified but no zone specified')
802 if self.rgw_zone and not self.rgw_realm:
803 raise SpecValidationError(
804 'Cannot add RGW: Zone specified but no realm specified')
805
806
807 yaml.add_representer(RGWSpec, ServiceSpec.yaml_representer)
808
809
810 class IscsiServiceSpec(ServiceSpec):
811 def __init__(self,
812 service_type: str = 'iscsi',
813 service_id: Optional[str] = None,
814 pool: Optional[str] = None,
815 trusted_ip_list: Optional[str] = None,
816 api_port: Optional[int] = None,
817 api_user: Optional[str] = None,
818 api_password: Optional[str] = None,
819 api_secure: Optional[bool] = None,
820 ssl_cert: Optional[str] = None,
821 ssl_key: Optional[str] = None,
822 placement: Optional[PlacementSpec] = None,
823 unmanaged: bool = False,
824 preview_only: bool = False,
825 config: Optional[Dict[str, str]] = None,
826 networks: Optional[List[str]] = None,
827 ):
828 assert service_type == 'iscsi'
829 super(IscsiServiceSpec, self).__init__('iscsi', service_id=service_id,
830 placement=placement, unmanaged=unmanaged,
831 preview_only=preview_only,
832 config=config, networks=networks)
833
834 #: RADOS pool where ceph-iscsi config data is stored.
835 self.pool = pool
836 #: list of trusted IP addresses
837 self.trusted_ip_list = trusted_ip_list
838 #: ``api_port`` as defined in the ``iscsi-gateway.cfg``
839 self.api_port = api_port
840 #: ``api_user`` as defined in the ``iscsi-gateway.cfg``
841 self.api_user = api_user
842 #: ``api_password`` as defined in the ``iscsi-gateway.cfg``
843 self.api_password = api_password
844 #: ``api_secure`` as defined in the ``iscsi-gateway.cfg``
845 self.api_secure = api_secure
846 #: SSL certificate
847 self.ssl_cert = ssl_cert
848 #: SSL private key
849 self.ssl_key = ssl_key
850
851 if not self.api_secure and self.ssl_cert and self.ssl_key:
852 self.api_secure = True
853
854 def validate(self) -> None:
855 super(IscsiServiceSpec, self).validate()
856
857 if not self.pool:
858 raise SpecValidationError(
859 'Cannot add ISCSI: No Pool specified')
860
861 # Do not need to check for api_user and api_password as they
862 # now default to 'admin' when setting up the gateway url. Older
863 # iSCSI specs from before this change should be fine as they will
864 # have been required to have an api_user and api_password set and
865 # will be unaffected by the new default value.
866
867
868 yaml.add_representer(IscsiServiceSpec, ServiceSpec.yaml_representer)
869
870
871 class AlertManagerSpec(ServiceSpec):
872 def __init__(self,
873 service_type: str = 'alertmanager',
874 service_id: Optional[str] = None,
875 placement: Optional[PlacementSpec] = None,
876 unmanaged: bool = False,
877 preview_only: bool = False,
878 user_data: Optional[Dict[str, Any]] = None,
879 config: Optional[Dict[str, str]] = None,
880 networks: Optional[List[str]] = None,
881 port: Optional[int] = None,
882 ):
883 assert service_type == 'alertmanager'
884 super(AlertManagerSpec, self).__init__(
885 'alertmanager', service_id=service_id,
886 placement=placement, unmanaged=unmanaged,
887 preview_only=preview_only, config=config, networks=networks)
888
889 # Custom configuration.
890 #
891 # Example:
892 # service_type: alertmanager
893 # service_id: xyz
894 # user_data:
895 # default_webhook_urls:
896 # - "https://foo"
897 # - "https://bar"
898 #
899 # Documentation:
900 # default_webhook_urls - A list of additional URL's that are
901 # added to the default receivers'
902 # <webhook_configs> configuration.
903 self.user_data = user_data or {}
904 self.port = port
905
906 def get_port_start(self) -> List[int]:
907 return [self.get_port(), 9094]
908
909 def get_port(self) -> int:
910 if self.port:
911 return self.port
912 else:
913 return 9093
914
915 def validate(self) -> None:
916 super(AlertManagerSpec, self).validate()
917
918 if self.port == 9094:
919 raise SpecValidationError(
920 'Port 9094 is reserved for AlertManager cluster listen address')
921
922
923 yaml.add_representer(AlertManagerSpec, ServiceSpec.yaml_representer)
924
925
926 class IngressSpec(ServiceSpec):
927 def __init__(self,
928 service_type: str = 'ingress',
929 service_id: Optional[str] = None,
930 config: Optional[Dict[str, str]] = None,
931 networks: Optional[List[str]] = None,
932 placement: Optional[PlacementSpec] = None,
933 backend_service: Optional[str] = None,
934 frontend_port: Optional[int] = None,
935 ssl_cert: Optional[str] = None,
936 ssl_key: Optional[str] = None,
937 ssl_dh_param: Optional[str] = None,
938 ssl_ciphers: Optional[List[str]] = None,
939 ssl_options: Optional[List[str]] = None,
940 monitor_port: Optional[int] = None,
941 monitor_user: Optional[str] = None,
942 monitor_password: Optional[str] = None,
943 enable_stats: Optional[bool] = None,
944 keepalived_password: Optional[str] = None,
945 virtual_ip: Optional[str] = None,
946 virtual_interface_networks: Optional[List[str]] = [],
947 unmanaged: bool = False,
948 ssl: bool = False
949 ):
950 assert service_type == 'ingress'
951 super(IngressSpec, self).__init__(
952 'ingress', service_id=service_id,
953 placement=placement, config=config,
954 networks=networks
955 )
956 self.backend_service = backend_service
957 self.frontend_port = frontend_port
958 self.ssl_cert = ssl_cert
959 self.ssl_key = ssl_key
960 self.ssl_dh_param = ssl_dh_param
961 self.ssl_ciphers = ssl_ciphers
962 self.ssl_options = ssl_options
963 self.monitor_port = monitor_port
964 self.monitor_user = monitor_user
965 self.monitor_password = monitor_password
966 self.keepalived_password = keepalived_password
967 self.virtual_ip = virtual_ip
968 self.virtual_interface_networks = virtual_interface_networks or []
969 self.unmanaged = unmanaged
970 self.ssl = ssl
971
972 def get_port_start(self) -> List[int]:
973 return [cast(int, self.frontend_port),
974 cast(int, self.monitor_port)]
975
976 def get_virtual_ip(self) -> Optional[str]:
977 return self.virtual_ip
978
979 def validate(self) -> None:
980 super(IngressSpec, self).validate()
981
982 if not self.backend_service:
983 raise SpecValidationError(
984 'Cannot add ingress: No backend_service specified')
985 if not self.frontend_port:
986 raise SpecValidationError(
987 'Cannot add ingress: No frontend_port specified')
988 if not self.monitor_port:
989 raise SpecValidationError(
990 'Cannot add ingress: No monitor_port specified')
991 if not self.virtual_ip:
992 raise SpecValidationError(
993 'Cannot add ingress: No virtual_ip provided')
994
995
996 yaml.add_representer(IngressSpec, ServiceSpec.yaml_representer)
997
998
999 class CustomContainerSpec(ServiceSpec):
1000 def __init__(self,
1001 service_type: str = 'container',
1002 service_id: Optional[str] = None,
1003 config: Optional[Dict[str, str]] = None,
1004 networks: Optional[List[str]] = None,
1005 placement: Optional[PlacementSpec] = None,
1006 unmanaged: bool = False,
1007 preview_only: bool = False,
1008 image: Optional[str] = None,
1009 entrypoint: Optional[str] = None,
1010 uid: Optional[int] = None,
1011 gid: Optional[int] = None,
1012 volume_mounts: Optional[Dict[str, str]] = {},
1013 args: Optional[List[str]] = [],
1014 envs: Optional[List[str]] = [],
1015 privileged: Optional[bool] = False,
1016 bind_mounts: Optional[List[List[str]]] = None,
1017 ports: Optional[List[int]] = [],
1018 dirs: Optional[List[str]] = [],
1019 files: Optional[Dict[str, Any]] = {},
1020 ):
1021 assert service_type == 'container'
1022 assert service_id is not None
1023 assert image is not None
1024
1025 super(CustomContainerSpec, self).__init__(
1026 service_type, service_id,
1027 placement=placement, unmanaged=unmanaged,
1028 preview_only=preview_only, config=config,
1029 networks=networks)
1030
1031 self.image = image
1032 self.entrypoint = entrypoint
1033 self.uid = uid
1034 self.gid = gid
1035 self.volume_mounts = volume_mounts
1036 self.args = args
1037 self.envs = envs
1038 self.privileged = privileged
1039 self.bind_mounts = bind_mounts
1040 self.ports = ports
1041 self.dirs = dirs
1042 self.files = files
1043
1044 def config_json(self) -> Dict[str, Any]:
1045 """
1046 Helper function to get the value of the `--config-json` cephadm
1047 command line option. It will contain all specification properties
1048 that haven't a `None` value. Such properties will get default
1049 values in cephadm.
1050 :return: Returns a dictionary containing all specification
1051 properties.
1052 """
1053 config_json = {}
1054 for prop in ['image', 'entrypoint', 'uid', 'gid', 'args',
1055 'envs', 'volume_mounts', 'privileged',
1056 'bind_mounts', 'ports', 'dirs', 'files']:
1057 value = getattr(self, prop)
1058 if value is not None:
1059 config_json[prop] = value
1060 return config_json
1061
1062
1063 yaml.add_representer(CustomContainerSpec, ServiceSpec.yaml_representer)
1064
1065
1066 class MonitoringSpec(ServiceSpec):
1067 def __init__(self,
1068 service_type: str,
1069 service_id: Optional[str] = None,
1070 config: Optional[Dict[str, str]] = None,
1071 networks: Optional[List[str]] = None,
1072 placement: Optional[PlacementSpec] = None,
1073 unmanaged: bool = False,
1074 preview_only: bool = False,
1075 port: Optional[int] = None,
1076 ):
1077 assert service_type in ['grafana', 'node-exporter', 'prometheus']
1078
1079 super(MonitoringSpec, self).__init__(
1080 service_type, service_id,
1081 placement=placement, unmanaged=unmanaged,
1082 preview_only=preview_only, config=config,
1083 networks=networks)
1084
1085 self.service_type = service_type
1086 self.port = port
1087
1088 def get_port_start(self) -> List[int]:
1089 return [self.get_port()]
1090
1091 def get_port(self) -> int:
1092 if self.port:
1093 return self.port
1094 else:
1095 return {'prometheus': 9095,
1096 'node-exporter': 9100,
1097 'grafana': 3000}[self.service_type]