]>
Commit | Line | Data |
---|---|---|
9f95a23c | 1 | import fnmatch |
2a845540 | 2 | import os |
9f95a23c | 3 | import re |
20effc67 | 4 | import enum |
f67539c2 | 5 | from collections import OrderedDict |
20effc67 | 6 | from contextlib import contextmanager |
1911f103 | 7 | from functools import wraps |
f67539c2 TL |
8 | from ipaddress import ip_network, ip_address |
9 | from typing import Optional, Dict, Any, List, Union, Callable, Iterable, Type, TypeVar, cast, \ | |
20effc67 | 10 | NamedTuple, Mapping, Iterator |
9f95a23c | 11 | |
f6b5b4d7 TL |
12 | import yaml |
13 | ||
20effc67 TL |
14 | from ceph.deployment.hostspec import HostSpec, SpecValidationError, assert_valid_host |
15 | from ceph.deployment.utils import unwrap_ipv6, valid_addr | |
16 | from ceph.utils import is_hex | |
9f95a23c | 17 | |
f67539c2 TL |
18 | ServiceSpecT = TypeVar('ServiceSpecT', bound='ServiceSpec') |
19 | FuncT = TypeVar('FuncT', bound=Callable) | |
9f95a23c | 20 | |
9f95a23c | 21 | |
f67539c2 | 22 | def handle_type_error(method: FuncT) -> FuncT: |
1911f103 | 23 | @wraps(method) |
f67539c2 | 24 | def inner(cls: Any, *args: Any, **kwargs: Any) -> Any: |
1911f103 TL |
25 | try: |
26 | return method(cls, *args, **kwargs) | |
27 | except (TypeError, AttributeError) as e: | |
28 | error_msg = '{}: {}'.format(cls.__name__, e) | |
f67539c2 TL |
29 | raise SpecValidationError(error_msg) |
30 | return cast(FuncT, inner) | |
31 | ||
1911f103 | 32 | |
f67539c2 TL |
33 | class HostPlacementSpec(NamedTuple): |
34 | hostname: str | |
35 | network: str | |
36 | name: str | |
1911f103 | 37 | |
f67539c2 | 38 | def __str__(self) -> str: |
9f95a23c TL |
39 | res = '' |
40 | res += self.hostname | |
41 | if self.network: | |
42 | res += ':' + self.network | |
43 | if self.name: | |
44 | res += '=' + self.name | |
45 | return res | |
46 | ||
47 | @classmethod | |
1911f103 | 48 | @handle_type_error |
f67539c2 | 49 | def from_json(cls, data: Union[dict, str]) -> 'HostPlacementSpec': |
f91f0fd5 TL |
50 | if isinstance(data, str): |
51 | return cls.parse(data) | |
9f95a23c TL |
52 | return cls(**data) |
53 | ||
f91f0fd5 TL |
54 | def to_json(self) -> str: |
55 | return str(self) | |
9f95a23c TL |
56 | |
57 | @classmethod | |
58 | def parse(cls, host, require_network=True): | |
59 | # type: (str, bool) -> HostPlacementSpec | |
60 | """ | |
61 | Split host into host, network, and (optional) daemon name parts. The network | |
62 | part can be an IP, CIDR, or ceph addrvec like '[v2:1.2.3.4:3300,v1:1.2.3.4:6789]'. | |
63 | e.g., | |
64 | "myhost" | |
65 | "myhost=name" | |
66 | "myhost:1.2.3.4" | |
67 | "myhost:1.2.3.4=name" | |
68 | "myhost:1.2.3.0/24" | |
69 | "myhost:1.2.3.0/24=name" | |
70 | "myhost:[v2:1.2.3.4:3000]=name" | |
71 | "myhost:[v2:1.2.3.4:3000,v1:1.2.3.4:6789]=name" | |
72 | """ | |
73 | # Matches from start to : or = or until end of string | |
74 | host_re = r'^(.*?)(:|=|$)' | |
75 | # Matches from : to = or until end of string | |
76 | ip_re = r':(.*?)(=|$)' | |
77 | # Matches from = to end of string | |
78 | name_re = r'=(.*?)$' | |
79 | ||
80 | # assign defaults | |
81 | host_spec = cls('', '', '') | |
82 | ||
83 | match_host = re.search(host_re, host) | |
84 | if match_host: | |
85 | host_spec = host_spec._replace(hostname=match_host.group(1)) | |
86 | ||
87 | name_match = re.search(name_re, host) | |
88 | if name_match: | |
89 | host_spec = host_spec._replace(name=name_match.group(1)) | |
90 | ||
91 | ip_match = re.search(ip_re, host) | |
92 | if ip_match: | |
93 | host_spec = host_spec._replace(network=ip_match.group(1)) | |
94 | ||
95 | if not require_network: | |
96 | return host_spec | |
97 | ||
9f95a23c TL |
98 | networks = list() # type: List[str] |
99 | network = host_spec.network | |
100 | # in case we have [v2:1.2.3.4:3000,v1:1.2.3.4:6478] | |
101 | if ',' in network: | |
102 | networks = [x for x in network.split(',')] | |
103 | else: | |
1911f103 TL |
104 | if network != '': |
105 | networks.append(network) | |
106 | ||
9f95a23c TL |
107 | for network in networks: |
108 | # only if we have versioned network configs | |
109 | if network.startswith('v') or network.startswith('[v'): | |
f91f0fd5 TL |
110 | # if this is ipv6 we can't just simply split on ':' so do |
111 | # a split once and rsplit once to leave us with just ipv6 addr | |
112 | network = network.split(':', 1)[1] | |
113 | network = network.rsplit(':', 1)[0] | |
9f95a23c TL |
114 | try: |
115 | # if subnets are defined, also verify the validity | |
116 | if '/' in network: | |
f67539c2 | 117 | ip_network(network) |
9f95a23c | 118 | else: |
f91f0fd5 | 119 | ip_address(unwrap_ipv6(network)) |
9f95a23c TL |
120 | except ValueError as e: |
121 | # logging? | |
122 | raise e | |
123 | host_spec.validate() | |
124 | return host_spec | |
125 | ||
f67539c2 | 126 | def validate(self) -> None: |
9f95a23c TL |
127 | assert_valid_host(self.hostname) |
128 | ||
129 | ||
130 | class PlacementSpec(object): | |
131 | """ | |
132 | For APIs that need to specify a host subset | |
133 | """ | |
134 | ||
135 | def __init__(self, | |
136 | label=None, # type: Optional[str] | |
f67539c2 | 137 | hosts=None, # type: Union[List[str],List[HostPlacementSpec], None] |
9f95a23c | 138 | count=None, # type: Optional[int] |
f67539c2 TL |
139 | count_per_host=None, # type: Optional[int] |
140 | host_pattern=None, # type: Optional[str] | |
9f95a23c TL |
141 | ): |
142 | # type: (...) -> None | |
143 | self.label = label | |
144 | self.hosts = [] # type: List[HostPlacementSpec] | |
145 | ||
146 | if hosts: | |
f6b5b4d7 | 147 | self.set_hosts(hosts) |
9f95a23c TL |
148 | |
149 | self.count = count # type: Optional[int] | |
f67539c2 | 150 | self.count_per_host = count_per_host # type: Optional[int] |
9f95a23c TL |
151 | |
152 | #: fnmatch patterns to select hosts. Can also be a single host. | |
153 | self.host_pattern = host_pattern # type: Optional[str] | |
154 | ||
155 | self.validate() | |
156 | ||
f67539c2 TL |
157 | def is_empty(self) -> bool: |
158 | return ( | |
159 | self.label is None | |
160 | and not self.hosts | |
161 | and not self.host_pattern | |
162 | and self.count is None | |
163 | and self.count_per_host is None | |
164 | ) | |
9f95a23c | 165 | |
f67539c2 | 166 | def __eq__(self, other: Any) -> bool: |
f6b5b4d7 TL |
167 | if isinstance(other, PlacementSpec): |
168 | return self.label == other.label \ | |
169 | and self.hosts == other.hosts \ | |
170 | and self.count == other.count \ | |
f67539c2 TL |
171 | and self.host_pattern == other.host_pattern \ |
172 | and self.count_per_host == other.count_per_host | |
f6b5b4d7 TL |
173 | return NotImplemented |
174 | ||
f67539c2 | 175 | def set_hosts(self, hosts: Union[List[str], List[HostPlacementSpec]]) -> None: |
9f95a23c TL |
176 | # To backpopulate the .hosts attribute when using labels or count |
177 | # in the orchestrator backend. | |
f6b5b4d7 TL |
178 | if all([isinstance(host, HostPlacementSpec) for host in hosts]): |
179 | self.hosts = hosts # type: ignore | |
180 | else: | |
181 | self.hosts = [HostPlacementSpec.parse(x, require_network=False) # type: ignore | |
182 | for x in hosts if x] | |
9f95a23c | 183 | |
f91f0fd5 | 184 | # deprecated |
e306af50 | 185 | def filter_matching_hosts(self, _get_hosts_func: Callable) -> List[str]: |
f6b5b4d7 TL |
186 | return self.filter_matching_hostspecs(_get_hosts_func(as_hostspec=True)) |
187 | ||
f91f0fd5 | 188 | def filter_matching_hostspecs(self, hostspecs: Iterable[HostSpec]) -> List[str]: |
e306af50 | 189 | if self.hosts: |
f6b5b4d7 | 190 | all_hosts = [hs.hostname for hs in hostspecs] |
e306af50 | 191 | return [h.hostname for h in self.hosts if h.hostname in all_hosts] |
f67539c2 | 192 | if self.label: |
f6b5b4d7 | 193 | return [hs.hostname for hs in hostspecs if self.label in hs.labels] |
f67539c2 TL |
194 | all_hosts = [hs.hostname for hs in hostspecs] |
195 | if self.host_pattern: | |
f6b5b4d7 | 196 | return fnmatch.filter(all_hosts, self.host_pattern) |
f67539c2 | 197 | return all_hosts |
e306af50 | 198 | |
f67539c2 | 199 | def get_target_count(self, hostspecs: Iterable[HostSpec]) -> int: |
e306af50 TL |
200 | if self.count: |
201 | return self.count | |
f67539c2 | 202 | return len(self.filter_matching_hostspecs(hostspecs)) * (self.count_per_host or 1) |
9f95a23c | 203 | |
f67539c2 | 204 | def pretty_str(self) -> str: |
f91f0fd5 TL |
205 | """ |
206 | >>> #doctest: +SKIP | |
207 | ... ps = PlacementSpec(...) # For all placement specs: | |
208 | ... PlacementSpec.from_string(ps.pretty_str()) == ps | |
209 | """ | |
9f95a23c | 210 | kv = [] |
f91f0fd5 TL |
211 | if self.hosts: |
212 | kv.append(';'.join([str(h) for h in self.hosts])) | |
9f95a23c TL |
213 | if self.count: |
214 | kv.append('count:%d' % self.count) | |
f67539c2 TL |
215 | if self.count_per_host: |
216 | kv.append('count-per-host:%d' % self.count_per_host) | |
9f95a23c TL |
217 | if self.label: |
218 | kv.append('label:%s' % self.label) | |
9f95a23c TL |
219 | if self.host_pattern: |
220 | kv.append(self.host_pattern) | |
f91f0fd5 | 221 | return ';'.join(kv) |
9f95a23c | 222 | |
f67539c2 | 223 | def __repr__(self) -> str: |
9f95a23c TL |
224 | kv = [] |
225 | if self.count: | |
226 | kv.append('count=%d' % self.count) | |
f67539c2 TL |
227 | if self.count_per_host: |
228 | kv.append('count_per_host=%d' % self.count_per_host) | |
9f95a23c TL |
229 | if self.label: |
230 | kv.append('label=%s' % repr(self.label)) | |
231 | if self.hosts: | |
232 | kv.append('hosts={!r}'.format(self.hosts)) | |
233 | if self.host_pattern: | |
234 | kv.append('host_pattern={!r}'.format(self.host_pattern)) | |
235 | return "PlacementSpec(%s)" % ', '.join(kv) | |
236 | ||
237 | @classmethod | |
1911f103 | 238 | @handle_type_error |
f67539c2 | 239 | def from_json(cls, data: dict) -> 'PlacementSpec': |
1911f103 TL |
240 | c = data.copy() |
241 | hosts = c.get('hosts', []) | |
9f95a23c | 242 | if hosts: |
1911f103 TL |
243 | c['hosts'] = [] |
244 | for host in hosts: | |
f91f0fd5 | 245 | c['hosts'].append(HostPlacementSpec.from_json(host)) |
1911f103 | 246 | _cls = cls(**c) |
9f95a23c TL |
247 | _cls.validate() |
248 | return _cls | |
249 | ||
f67539c2 TL |
250 | def to_json(self) -> dict: |
251 | r: Dict[str, Any] = {} | |
9f95a23c TL |
252 | if self.label: |
253 | r['label'] = self.label | |
254 | if self.hosts: | |
255 | r['hosts'] = [host.to_json() for host in self.hosts] | |
256 | if self.count: | |
257 | r['count'] = self.count | |
f67539c2 TL |
258 | if self.count_per_host: |
259 | r['count_per_host'] = self.count_per_host | |
9f95a23c TL |
260 | if self.host_pattern: |
261 | r['host_pattern'] = self.host_pattern | |
262 | return r | |
263 | ||
f67539c2 | 264 | def validate(self) -> None: |
9f95a23c TL |
265 | if self.hosts and self.label: |
266 | # TODO: a less generic Exception | |
f67539c2 | 267 | raise SpecValidationError('Host and label are mutually exclusive') |
20effc67 TL |
268 | if self.count is not None: |
269 | try: | |
270 | intval = int(self.count) | |
271 | except (ValueError, TypeError): | |
272 | raise SpecValidationError("num/count must be a numeric value") | |
273 | if self.count != intval: | |
274 | raise SpecValidationError("num/count must be an integer value") | |
275 | if self.count < 1: | |
276 | raise SpecValidationError("num/count must be >= 1") | |
277 | if self.count_per_host is not None: | |
278 | try: | |
279 | intval = int(self.count_per_host) | |
280 | except (ValueError, TypeError): | |
281 | raise SpecValidationError("count-per-host must be a numeric value") | |
282 | if self.count_per_host != intval: | |
283 | raise SpecValidationError("count-per-host must be an integer value") | |
284 | if self.count_per_host < 1: | |
285 | raise SpecValidationError("count-per-host must be >= 1") | |
f67539c2 TL |
286 | if self.count_per_host is not None and not ( |
287 | self.label | |
288 | or self.hosts | |
289 | or self.host_pattern | |
290 | ): | |
291 | raise SpecValidationError( | |
292 | "count-per-host must be combined with label or hosts or host_pattern" | |
293 | ) | |
294 | if self.count is not None and self.count_per_host is not None: | |
295 | raise SpecValidationError("cannot combine count and count-per-host") | |
296 | if ( | |
297 | self.count_per_host is not None | |
298 | and self.hosts | |
299 | and any([hs.network or hs.name for hs in self.hosts]) | |
300 | ): | |
301 | raise SpecValidationError( | |
302 | "count-per-host cannot be combined explicit placement with names or networks" | |
303 | ) | |
20effc67 TL |
304 | if self.host_pattern: |
305 | if not isinstance(self.host_pattern, str): | |
306 | raise SpecValidationError('host_pattern must be of type string') | |
307 | if self.hosts: | |
308 | raise SpecValidationError('cannot combine host patterns and hosts') | |
309 | ||
9f95a23c TL |
310 | for h in self.hosts: |
311 | h.validate() | |
312 | ||
313 | @classmethod | |
314 | def from_string(cls, arg): | |
315 | # type: (Optional[str]) -> PlacementSpec | |
316 | """ | |
317 | A single integer is parsed as a count: | |
1d09f67e | 318 | |
9f95a23c TL |
319 | >>> PlacementSpec.from_string('3') |
320 | PlacementSpec(count=3) | |
321 | ||
322 | A list of names is parsed as host specifications: | |
1d09f67e | 323 | |
9f95a23c TL |
324 | >>> PlacementSpec.from_string('host1 host2') |
325 | PlacementSpec(hosts=[HostPlacementSpec(hostname='host1', network='', name=''), HostPlacemen\ | |
326 | tSpec(hostname='host2', network='', name='')]) | |
327 | ||
328 | You can also prefix the hosts with a count as follows: | |
1d09f67e | 329 | |
9f95a23c TL |
330 | >>> PlacementSpec.from_string('2 host1 host2') |
331 | PlacementSpec(count=2, hosts=[HostPlacementSpec(hostname='host1', network='', name=''), Hos\ | |
332 | tPlacementSpec(hostname='host2', network='', name='')]) | |
333 | ||
1d09f67e TL |
334 | You can specify labels using `label:<label>` |
335 | ||
9f95a23c TL |
336 | >>> PlacementSpec.from_string('label:mon') |
337 | PlacementSpec(label='mon') | |
338 | ||
1d09f67e TL |
339 | Labels also support a count: |
340 | ||
9f95a23c TL |
341 | >>> PlacementSpec.from_string('3 label:mon') |
342 | PlacementSpec(count=3, label='mon') | |
343 | ||
344 | fnmatch is also supported: | |
1d09f67e | 345 | |
9f95a23c TL |
346 | >>> PlacementSpec.from_string('data[1-3]') |
347 | PlacementSpec(host_pattern='data[1-3]') | |
348 | ||
349 | >>> PlacementSpec.from_string(None) | |
350 | PlacementSpec() | |
351 | """ | |
352 | if arg is None or not arg: | |
353 | strings = [] | |
354 | elif isinstance(arg, str): | |
355 | if ' ' in arg: | |
356 | strings = arg.split(' ') | |
357 | elif ';' in arg: | |
358 | strings = arg.split(';') | |
359 | elif ',' in arg and '[' not in arg: | |
360 | # FIXME: this isn't quite right. we want to avoid breaking | |
361 | # a list of mons with addrvecs... so we're basically allowing | |
362 | # , most of the time, except when addrvecs are used. maybe | |
363 | # ok? | |
364 | strings = arg.split(',') | |
365 | else: | |
366 | strings = [arg] | |
367 | else: | |
f67539c2 | 368 | raise SpecValidationError('invalid placement %s' % arg) |
9f95a23c TL |
369 | |
370 | count = None | |
f67539c2 | 371 | count_per_host = None |
9f95a23c TL |
372 | if strings: |
373 | try: | |
374 | count = int(strings[0]) | |
375 | strings = strings[1:] | |
376 | except ValueError: | |
377 | pass | |
378 | for s in strings: | |
379 | if s.startswith('count:'): | |
380 | try: | |
f67539c2 TL |
381 | count = int(s[len('count:'):]) |
382 | strings.remove(s) | |
383 | break | |
384 | except ValueError: | |
385 | pass | |
386 | for s in strings: | |
387 | if s.startswith('count-per-host:'): | |
388 | try: | |
389 | count_per_host = int(s[len('count-per-host:'):]) | |
9f95a23c TL |
390 | strings.remove(s) |
391 | break | |
392 | except ValueError: | |
393 | pass | |
394 | ||
395 | advanced_hostspecs = [h for h in strings if | |
396 | (':' in h or '=' in h or not any(c in '[]?*:=' for c in h)) and | |
397 | 'label:' not in h] | |
398 | for a_h in advanced_hostspecs: | |
399 | strings.remove(a_h) | |
400 | ||
401 | labels = [x for x in strings if 'label:' in x] | |
402 | if len(labels) > 1: | |
f67539c2 | 403 | raise SpecValidationError('more than one label provided: {}'.format(labels)) |
9f95a23c TL |
404 | for l in labels: |
405 | strings.remove(l) | |
406 | label = labels[0][6:] if labels else None | |
407 | ||
408 | host_patterns = strings | |
409 | if len(host_patterns) > 1: | |
f67539c2 | 410 | raise SpecValidationError( |
9f95a23c TL |
411 | 'more than one host pattern provided: {}'.format(host_patterns)) |
412 | ||
413 | ps = PlacementSpec(count=count, | |
f67539c2 | 414 | count_per_host=count_per_host, |
9f95a23c TL |
415 | hosts=advanced_hostspecs, |
416 | label=label, | |
417 | host_pattern=host_patterns[0] if host_patterns else None) | |
418 | return ps | |
419 | ||
420 | ||
20effc67 TL |
421 | _service_spec_from_json_validate = True |
422 | ||
423 | ||
2a845540 TL |
424 | class CustomConfig: |
425 | """ | |
426 | Class to specify custom config files to be mounted in daemon's container | |
427 | """ | |
428 | ||
429 | _fields = ['content', 'mount_path'] | |
430 | ||
431 | def __init__(self, content: str, mount_path: str) -> None: | |
432 | self.content: str = content | |
433 | self.mount_path: str = mount_path | |
434 | self.validate() | |
435 | ||
436 | def to_json(self) -> Dict[str, Any]: | |
437 | return { | |
438 | 'content': self.content, | |
439 | 'mount_path': self.mount_path, | |
440 | } | |
441 | ||
442 | @classmethod | |
443 | def from_json(cls, data: Dict[str, Any]) -> "CustomConfig": | |
444 | for k in cls._fields: | |
445 | if k not in data: | |
446 | raise SpecValidationError(f'CustomConfig must have "{k}" field') | |
447 | for k in data.keys(): | |
448 | if k not in cls._fields: | |
449 | raise SpecValidationError(f'CustomConfig got unknown field "{k}"') | |
450 | return cls(**data) | |
451 | ||
452 | @property | |
453 | def filename(self) -> str: | |
454 | return os.path.basename(self.mount_path) | |
455 | ||
456 | def __eq__(self, other: Any) -> bool: | |
457 | if isinstance(other, CustomConfig): | |
458 | return ( | |
459 | self.content == other.content | |
460 | and self.mount_path == other.mount_path | |
461 | ) | |
462 | return NotImplemented | |
463 | ||
464 | def __repr__(self) -> str: | |
465 | return f'CustomConfig({self.mount_path})' | |
466 | ||
467 | def validate(self) -> None: | |
468 | if not isinstance(self.content, str): | |
469 | raise SpecValidationError( | |
470 | f'CustomConfig content must be a string. Got {type(self.content)}') | |
471 | if not isinstance(self.mount_path, str): | |
472 | raise SpecValidationError( | |
473 | f'CustomConfig content must be a string. Got {type(self.mount_path)}') | |
474 | ||
475 | ||
20effc67 TL |
476 | @contextmanager |
477 | def service_spec_allow_invalid_from_json() -> Iterator[None]: | |
478 | """ | |
479 | I know this is evil, but unfortunately `ceph orch ls` | |
480 | may return invalid OSD specs for OSDs not associated to | |
481 | and specs. If you have a better idea, please! | |
482 | """ | |
483 | global _service_spec_from_json_validate | |
484 | _service_spec_from_json_validate = False | |
485 | yield | |
486 | _service_spec_from_json_validate = True | |
487 | ||
488 | ||
aee94f69 TL |
489 | class ArgumentSpec: |
490 | """The ArgumentSpec type represents an argument that can be | |
491 | passed to an underyling subsystem, like a container engine or | |
492 | another command line tool. | |
493 | ||
494 | The ArgumentSpec aims to be backwards compatible with the previous | |
495 | form of argument, a single string. The string was always assumed | |
496 | to be indentended to be split on spaces. For example: | |
497 | `--cpus 8` becomes `["--cpus", "8"]`. This type is converted from | |
498 | either a string or an json/yaml object. In the object form you | |
499 | can choose if the string part should be split so an argument like | |
500 | `--migrate-from=//192.168.5.22/My Documents` can be expressed. | |
501 | """ | |
502 | _fields = ['argument', 'split'] | |
503 | ||
504 | class OriginalType(enum.Enum): | |
505 | OBJECT = 0 | |
506 | STRING = 1 | |
507 | ||
508 | def __init__( | |
509 | self, | |
510 | argument: str, | |
511 | split: bool = False, | |
512 | *, | |
513 | origin: OriginalType = OriginalType.OBJECT, | |
514 | ) -> None: | |
515 | self.argument = argument | |
516 | self.split = bool(split) | |
517 | # origin helps with round-tripping between inputs that | |
518 | # are simple strings or objects (dicts) | |
519 | self._origin = origin | |
520 | self.validate() | |
521 | ||
522 | def to_json(self) -> Union[str, Dict[str, Any]]: | |
523 | """Return a json-safe represenation of the ArgumentSpec.""" | |
524 | if self._origin == self.OriginalType.STRING: | |
525 | return self.argument | |
526 | return { | |
527 | 'argument': self.argument, | |
528 | 'split': self.split, | |
529 | } | |
530 | ||
531 | def to_args(self) -> List[str]: | |
532 | """Convert this ArgumentSpec into a list of arguments suitable for | |
533 | adding to an argv-style command line. | |
534 | """ | |
535 | if not self.split: | |
536 | return [self.argument] | |
537 | return [part for part in self.argument.split(" ") if part] | |
538 | ||
539 | def __eq__(self, other: Any) -> bool: | |
540 | if isinstance(other, ArgumentSpec): | |
541 | return ( | |
542 | self.argument == other.argument | |
543 | and self.split == other.split | |
544 | ) | |
545 | if isinstance(other, object): | |
546 | # This is a workaround for silly ceph mgr object/type identity | |
547 | # mismatches due to multiple python interpreters in use. | |
548 | try: | |
549 | argument = getattr(other, 'argument') | |
550 | split = getattr(other, 'split') | |
551 | return (self.argument == argument and self.split == split) | |
552 | except AttributeError: | |
553 | pass | |
554 | return NotImplemented | |
555 | ||
556 | def __repr__(self) -> str: | |
557 | return f'ArgumentSpec({self.argument!r}, {self.split!r})' | |
558 | ||
559 | def validate(self) -> None: | |
560 | if not isinstance(self.argument, str): | |
561 | raise SpecValidationError( | |
562 | f'ArgumentSpec argument must be a string. Got {type(self.argument)}') | |
563 | if not isinstance(self.split, bool): | |
564 | raise SpecValidationError( | |
565 | f'ArgumentSpec split must be a boolean. Got {type(self.split)}') | |
566 | ||
567 | @classmethod | |
568 | def from_json(cls, data: Union[str, Dict[str, Any]]) -> "ArgumentSpec": | |
569 | """Convert a json-object (dict) to an ArgumentSpec.""" | |
570 | if isinstance(data, str): | |
571 | return cls(data, split=True, origin=cls.OriginalType.STRING) | |
572 | if 'argument' not in data: | |
573 | raise SpecValidationError(f'ArgumentSpec must have an "argument" field') | |
574 | for k in data.keys(): | |
575 | if k not in cls._fields: | |
576 | raise SpecValidationError(f'ArgumentSpec got an unknown field {k!r}') | |
577 | return cls(**data) | |
578 | ||
579 | @staticmethod | |
580 | def map_json( | |
581 | values: Optional["ArgumentList"] | |
582 | ) -> Optional[List[Union[str, Dict[str, Any]]]]: | |
583 | """Given a list of ArgumentSpec objects return a json-safe | |
584 | representation.of them.""" | |
585 | if values is None: | |
586 | return None | |
587 | return [v.to_json() for v in values] | |
588 | ||
589 | @classmethod | |
590 | def from_general_args(cls, data: "GeneralArgList") -> "ArgumentList": | |
591 | """Convert a list of strs, dicts, or existing ArgumentSpec objects | |
592 | to a list of only ArgumentSpec objects. | |
593 | """ | |
594 | out: ArgumentList = [] | |
595 | for item in data: | |
596 | if isinstance(item, (str, dict)): | |
597 | out.append(cls.from_json(item)) | |
598 | elif isinstance(item, cls): | |
599 | out.append(item) | |
600 | elif hasattr(item, 'to_json'): | |
601 | # This is a workaround for silly ceph mgr object/type identity | |
602 | # mismatches due to multiple python interpreters in use. | |
603 | # It should be safe because we already have to be able to | |
604 | # round-trip between json/yaml. | |
605 | out.append(cls.from_json(item.to_json())) | |
606 | else: | |
607 | raise SpecValidationError(f"Unknown type for argument: {type(item)}") | |
608 | return out | |
609 | ||
610 | ||
611 | ArgumentList = List[ArgumentSpec] | |
612 | GeneralArgList = List[Union[str, Dict[str, Any], "ArgumentSpec"]] | |
613 | ||
614 | ||
9f95a23c TL |
615 | class ServiceSpec(object): |
616 | """ | |
617 | Details of service creation. | |
618 | ||
619 | Request to the orchestrator for a cluster of daemons | |
aee94f69 | 620 | such as MDS, RGW, iscsi gateway, nvmeof gateway, MONs, MGRs, Prometheus |
9f95a23c TL |
621 | |
622 | This structure is supposed to be enough information to | |
623 | start the services. | |
9f95a23c | 624 | """ |
aee94f69 | 625 | KNOWN_SERVICE_TYPES = 'alertmanager crash grafana iscsi nvmeof loki promtail mds mgr mon nfs ' \ |
1e59de90 TL |
626 | 'node-exporter osd prometheus rbd-mirror rgw agent ceph-exporter ' \ |
627 | 'container ingress cephfs-mirror snmp-gateway jaeger-tracing ' \ | |
628 | 'elasticsearch jaeger-agent jaeger-collector jaeger-query'.split() | |
aee94f69 | 629 | REQUIRES_SERVICE_ID = 'iscsi nvmeof mds nfs rgw container ingress '.split() |
f67539c2 TL |
630 | MANAGED_CONFIG_OPTIONS = [ |
631 | 'mds_join_fs', | |
632 | ] | |
9f95a23c | 633 | |
1911f103 | 634 | @classmethod |
f67539c2 | 635 | def _cls(cls: Type[ServiceSpecT], service_type: str) -> Type[ServiceSpecT]: |
1911f103 TL |
636 | from ceph.deployment.drive_group import DriveGroupSpec |
637 | ||
638 | ret = { | |
1e59de90 | 639 | 'mon': MONSpec, |
1911f103 TL |
640 | 'rgw': RGWSpec, |
641 | 'nfs': NFSServiceSpec, | |
642 | 'osd': DriveGroupSpec, | |
33c7a0ef | 643 | 'mds': MDSSpec, |
1911f103 | 644 | 'iscsi': IscsiServiceSpec, |
aee94f69 | 645 | 'nvmeof': NvmeofServiceSpec, |
f91f0fd5 | 646 | 'alertmanager': AlertManagerSpec, |
f67539c2 | 647 | 'ingress': IngressSpec, |
f91f0fd5 | 648 | 'container': CustomContainerSpec, |
20effc67 | 649 | 'grafana': GrafanaSpec, |
b3b6e05e | 650 | 'node-exporter': MonitoringSpec, |
39ae355f TL |
651 | 'ceph-exporter': CephExporterSpec, |
652 | 'prometheus': PrometheusSpec, | |
33c7a0ef TL |
653 | 'loki': MonitoringSpec, |
654 | 'promtail': MonitoringSpec, | |
20effc67 | 655 | 'snmp-gateway': SNMPGatewaySpec, |
1e59de90 TL |
656 | 'elasticsearch': TracingSpec, |
657 | 'jaeger-agent': TracingSpec, | |
658 | 'jaeger-collector': TracingSpec, | |
659 | 'jaeger-query': TracingSpec, | |
660 | 'jaeger-tracing': TracingSpec, | |
1911f103 TL |
661 | }.get(service_type, cls) |
662 | if ret == ServiceSpec and not service_type: | |
f67539c2 | 663 | raise SpecValidationError('Spec needs a "service_type" key.') |
1911f103 TL |
664 | return ret |
665 | ||
f67539c2 | 666 | def __new__(cls: Type[ServiceSpecT], *args: Any, **kwargs: Any) -> ServiceSpecT: |
1911f103 TL |
667 | """ |
668 | Some Python foo to make sure, we don't have an object | |
669 | like `ServiceSpec('rgw')` of type `ServiceSpec`. Now we have: | |
670 | ||
671 | >>> type(ServiceSpec('rgw')) == type(RGWSpec('rgw')) | |
672 | True | |
673 | ||
674 | """ | |
675 | if cls != ServiceSpec: | |
676 | return object.__new__(cls) | |
677 | service_type = kwargs.get('service_type', args[0] if args else None) | |
f67539c2 | 678 | sub_cls: Any = cls._cls(service_type) |
1911f103 TL |
679 | return object.__new__(sub_cls) |
680 | ||
9f95a23c | 681 | def __init__(self, |
e306af50 TL |
682 | service_type: str, |
683 | service_id: Optional[str] = None, | |
684 | placement: Optional[PlacementSpec] = None, | |
685 | count: Optional[int] = None, | |
f67539c2 | 686 | config: Optional[Dict[str, str]] = None, |
e306af50 | 687 | unmanaged: bool = False, |
f6b5b4d7 | 688 | preview_only: bool = False, |
f67539c2 | 689 | networks: Optional[List[str]] = None, |
aee94f69 TL |
690 | extra_container_args: Optional[GeneralArgList] = None, |
691 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
2a845540 | 692 | custom_configs: Optional[List[CustomConfig]] = None, |
9f95a23c | 693 | ): |
a4b75251 TL |
694 | |
695 | #: See :ref:`orchestrator-cli-placement-spec`. | |
9f95a23c TL |
696 | self.placement = PlacementSpec() if placement is None else placement # type: PlacementSpec |
697 | ||
698 | assert service_type in ServiceSpec.KNOWN_SERVICE_TYPES, service_type | |
a4b75251 TL |
699 | #: The type of the service. Needs to be either a Ceph |
700 | #: service (``mon``, ``crash``, ``mds``, ``mgr``, ``osd`` or | |
701 | #: ``rbd-mirror``), a gateway (``nfs`` or ``rgw``), part of the | |
702 | #: monitoring stack (``alertmanager``, ``grafana``, ``node-exporter`` or | |
703 | #: ``prometheus``) or (``container``) for custom containers. | |
9f95a23c | 704 | self.service_type = service_type |
a4b75251 | 705 | |
aee94f69 TL |
706 | #: The name of the service. Required for ``iscsi``, ``nvmeof``, ``mds``, ``nfs``, ``osd``, |
707 | #: ``rgw``, ``container``, ``ingress`` | |
f6b5b4d7 | 708 | self.service_id = None |
a4b75251 | 709 | |
20effc67 | 710 | if self.service_type in self.REQUIRES_SERVICE_ID or self.service_type == 'osd': |
f6b5b4d7 | 711 | self.service_id = service_id |
a4b75251 TL |
712 | |
713 | #: If set to ``true``, the orchestrator will not deploy nor remove | |
714 | #: any daemon associated with this service. Placement and all other properties | |
715 | #: will be ignored. This is useful, if you do not want this service to be | |
716 | #: managed temporarily. For cephadm, See :ref:`cephadm-spec-unmanaged` | |
9f95a23c | 717 | self.unmanaged = unmanaged |
f6b5b4d7 | 718 | self.preview_only = preview_only |
a4b75251 TL |
719 | |
720 | #: A list of network identities instructing the daemons to only bind | |
721 | #: on the particular networks in that list. In case the cluster is distributed | |
722 | #: across multiple networks, you can add multiple networks. See | |
723 | #: :ref:`cephadm-monitoring-networks-ports`, | |
724 | #: :ref:`cephadm-rgw-networks` and :ref:`cephadm-mgr-networks`. | |
f67539c2 TL |
725 | self.networks: List[str] = networks or [] |
726 | ||
727 | self.config: Optional[Dict[str, str]] = None | |
728 | if config: | |
729 | self.config = {k.replace(' ', '_'): v for k, v in config.items()} | |
9f95a23c | 730 | |
aee94f69 TL |
731 | self.extra_container_args: Optional[ArgumentList] = None |
732 | self.extra_entrypoint_args: Optional[ArgumentList] = None | |
733 | if extra_container_args: | |
734 | self.extra_container_args = ArgumentSpec.from_general_args( | |
735 | extra_container_args) | |
736 | if extra_entrypoint_args: | |
737 | self.extra_entrypoint_args = ArgumentSpec.from_general_args( | |
738 | extra_entrypoint_args) | |
2a845540 | 739 | self.custom_configs: Optional[List[CustomConfig]] = custom_configs |
20effc67 | 740 | |
9f95a23c | 741 | @classmethod |
1911f103 | 742 | @handle_type_error |
f67539c2 | 743 | def from_json(cls: Type[ServiceSpecT], json_spec: Dict) -> ServiceSpecT: |
9f95a23c TL |
744 | """ |
745 | Initialize 'ServiceSpec' object data from a json structure | |
f6b5b4d7 TL |
746 | |
747 | There are two valid styles for service specs: | |
748 | ||
749 | the "old" style: | |
750 | ||
751 | .. code:: yaml | |
752 | ||
753 | service_type: nfs | |
754 | service_id: foo | |
755 | pool: mypool | |
756 | namespace: myns | |
757 | ||
758 | and the "new" style: | |
759 | ||
760 | .. code:: yaml | |
761 | ||
762 | service_type: nfs | |
763 | service_id: foo | |
f67539c2 TL |
764 | config: |
765 | some_option: the_value | |
766 | networks: [10.10.0.0/16] | |
f6b5b4d7 TL |
767 | spec: |
768 | pool: mypool | |
769 | namespace: myns | |
770 | ||
771 | In https://tracker.ceph.com/issues/45321 we decided that we'd like to | |
772 | prefer the new style as it is more readable and provides a better | |
773 | understanding of what fields are special for a give service type. | |
774 | ||
775 | Note, we'll need to stay compatible with both versions for the | |
1e59de90 | 776 | the next two major releases (octopus, pacific). |
f6b5b4d7 | 777 | |
9f95a23c | 778 | :param json_spec: A valid dict with ServiceSpec |
a4b75251 TL |
779 | |
780 | :meta private: | |
9f95a23c | 781 | """ |
f67539c2 TL |
782 | if not isinstance(json_spec, dict): |
783 | raise SpecValidationError( | |
784 | f'Service Spec is not an (JSON or YAML) object. got "{str(json_spec)}"') | |
785 | ||
786 | json_spec = cls.normalize_json(json_spec) | |
787 | ||
1911f103 | 788 | c = json_spec.copy() |
9f95a23c | 789 | |
1911f103 TL |
790 | # kludge to make `from_json` compatible to `Orchestrator.describe_service` |
791 | # Open question: Remove `service_id` form to_json? | |
792 | if c.get('service_name', ''): | |
793 | service_type_id = c['service_name'].split('.', 1) | |
794 | ||
795 | if not c.get('service_type', ''): | |
796 | c['service_type'] = service_type_id[0] | |
797 | if not c.get('service_id', '') and len(service_type_id) > 1: | |
798 | c['service_id'] = service_type_id[1] | |
799 | del c['service_name'] | |
800 | ||
801 | service_type = c.get('service_type', '') | |
802 | _cls = cls._cls(service_type) | |
803 | ||
804 | if 'status' in c: | |
805 | del c['status'] # kludge to make us compatible to `ServiceDescription.to_json()` | |
9f95a23c | 806 | |
1911f103 | 807 | return _cls._from_json_impl(c) # type: ignore |
9f95a23c | 808 | |
f67539c2 TL |
809 | @staticmethod |
810 | def normalize_json(json_spec: dict) -> dict: | |
811 | networks = json_spec.get('networks') | |
812 | if networks is None: | |
813 | return json_spec | |
814 | if isinstance(networks, list): | |
815 | return json_spec | |
816 | if not isinstance(networks, str): | |
817 | raise SpecValidationError(f'Networks ({networks}) must be a string or list of strings') | |
818 | json_spec['networks'] = [networks] | |
819 | return json_spec | |
820 | ||
9f95a23c | 821 | @classmethod |
f67539c2 TL |
822 | def _from_json_impl(cls: Type[ServiceSpecT], json_spec: dict) -> ServiceSpecT: |
823 | args = {} # type: Dict[str, Any] | |
9f95a23c TL |
824 | for k, v in json_spec.items(): |
825 | if k == 'placement': | |
826 | v = PlacementSpec.from_json(v) | |
2a845540 TL |
827 | if k == 'custom_configs': |
828 | v = [CustomConfig.from_json(c) for c in v] | |
9f95a23c TL |
829 | if k == 'spec': |
830 | args.update(v) | |
831 | continue | |
832 | args.update({k: v}) | |
1911f103 | 833 | _cls = cls(**args) |
20effc67 TL |
834 | if _service_spec_from_json_validate: |
835 | _cls.validate() | |
1911f103 | 836 | return _cls |
9f95a23c | 837 | |
f67539c2 | 838 | def service_name(self) -> str: |
9f95a23c TL |
839 | n = self.service_type |
840 | if self.service_id: | |
841 | n += '.' + self.service_id | |
842 | return n | |
843 | ||
f67539c2 TL |
844 | def get_port_start(self) -> List[int]: |
845 | # If defined, we will allocate and number ports starting at this | |
846 | # point. | |
847 | return [] | |
848 | ||
849 | def get_virtual_ip(self) -> Optional[str]: | |
850 | return None | |
851 | ||
9f95a23c | 852 | def to_json(self): |
f6b5b4d7 TL |
853 | # type: () -> OrderedDict[str, Any] |
854 | ret: OrderedDict[str, Any] = OrderedDict() | |
855 | ret['service_type'] = self.service_type | |
856 | if self.service_id: | |
857 | ret['service_id'] = self.service_id | |
858 | ret['service_name'] = self.service_name() | |
20effc67 TL |
859 | if self.placement.to_json(): |
860 | ret['placement'] = self.placement.to_json() | |
f6b5b4d7 TL |
861 | if self.unmanaged: |
862 | ret['unmanaged'] = self.unmanaged | |
f67539c2 TL |
863 | if self.networks: |
864 | ret['networks'] = self.networks | |
20effc67 | 865 | if self.extra_container_args: |
aee94f69 TL |
866 | ret['extra_container_args'] = ArgumentSpec.map_json( |
867 | self.extra_container_args | |
868 | ) | |
39ae355f | 869 | if self.extra_entrypoint_args: |
aee94f69 TL |
870 | ret['extra_entrypoint_args'] = ArgumentSpec.map_json( |
871 | self.extra_entrypoint_args | |
872 | ) | |
2a845540 TL |
873 | if self.custom_configs: |
874 | ret['custom_configs'] = [c.to_json() for c in self.custom_configs] | |
f6b5b4d7 | 875 | |
9f95a23c | 876 | c = {} |
f6b5b4d7 TL |
877 | for key, val in sorted(self.__dict__.items(), key=lambda tpl: tpl[0]): |
878 | if key in ret: | |
879 | continue | |
9f95a23c TL |
880 | if hasattr(val, 'to_json'): |
881 | val = val.to_json() | |
882 | if val: | |
883 | c[key] = val | |
f6b5b4d7 TL |
884 | if c: |
885 | ret['spec'] = c | |
886 | return ret | |
9f95a23c | 887 | |
f67539c2 | 888 | def validate(self) -> None: |
9f95a23c | 889 | if not self.service_type: |
f67539c2 | 890 | raise SpecValidationError('Cannot add Service: type required') |
9f95a23c | 891 | |
20effc67 TL |
892 | if self.service_type != 'osd': |
893 | if self.service_type in self.REQUIRES_SERVICE_ID and not self.service_id: | |
f67539c2 | 894 | raise SpecValidationError('Cannot add Service: id required') |
20effc67 TL |
895 | if self.service_type not in self.REQUIRES_SERVICE_ID and self.service_id: |
896 | raise SpecValidationError( | |
897 | f'Service of type \'{self.service_type}\' should not contain a service id') | |
898 | ||
899 | if self.service_id: | |
33c7a0ef | 900 | if not re.match('^[a-zA-Z0-9_.-]+$', str(self.service_id)): |
f67539c2 TL |
901 | raise SpecValidationError('Service id contains invalid characters, ' |
902 | 'only [a-zA-Z0-9_.-] allowed') | |
f6b5b4d7 | 903 | |
9f95a23c TL |
904 | if self.placement is not None: |
905 | self.placement.validate() | |
f67539c2 TL |
906 | if self.config: |
907 | for k, v in self.config.items(): | |
908 | if k in self.MANAGED_CONFIG_OPTIONS: | |
909 | raise SpecValidationError( | |
910 | f'Cannot set config option {k} in spec: it is managed by cephadm' | |
911 | ) | |
912 | for network in self.networks or []: | |
913 | try: | |
914 | ip_network(network) | |
915 | except ValueError as e: | |
916 | raise SpecValidationError( | |
917 | f'Cannot parse network {network}: {e}' | |
918 | ) | |
9f95a23c | 919 | |
f67539c2 | 920 | def __repr__(self) -> str: |
20effc67 TL |
921 | y = yaml.dump(cast(dict, self), default_flow_style=False) |
922 | return f"{self.__class__.__name__}.from_json(yaml.safe_load('''{y}'''))" | |
9f95a23c | 923 | |
f67539c2 | 924 | def __eq__(self, other: Any) -> bool: |
f6b5b4d7 TL |
925 | return (self.__class__ == other.__class__ |
926 | and | |
927 | self.__dict__ == other.__dict__) | |
928 | ||
f67539c2 | 929 | def one_line_str(self) -> str: |
9f95a23c TL |
930 | return '<{} for service_name={}>'.format(self.__class__.__name__, self.service_name()) |
931 | ||
f6b5b4d7 | 932 | @staticmethod |
f67539c2 | 933 | def yaml_representer(dumper: 'yaml.SafeDumper', data: 'ServiceSpec') -> Any: |
522d829b | 934 | return dumper.represent_dict(cast(Mapping, data.to_json().items())) |
f6b5b4d7 | 935 | |
9f95a23c | 936 | |
f6b5b4d7 | 937 | yaml.add_representer(ServiceSpec, ServiceSpec.yaml_representer) |
9f95a23c TL |
938 | |
939 | ||
940 | class NFSServiceSpec(ServiceSpec): | |
e306af50 TL |
941 | def __init__(self, |
942 | service_type: str = 'nfs', | |
943 | service_id: Optional[str] = None, | |
e306af50 TL |
944 | placement: Optional[PlacementSpec] = None, |
945 | unmanaged: bool = False, | |
f67539c2 TL |
946 | preview_only: bool = False, |
947 | config: Optional[Dict[str, str]] = None, | |
948 | networks: Optional[List[str]] = None, | |
b3b6e05e | 949 | port: Optional[int] = None, |
1e59de90 | 950 | virtual_ip: Optional[str] = None, |
aee94f69 TL |
951 | extra_container_args: Optional[GeneralArgList] = None, |
952 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
953 | enable_haproxy_protocol: bool = False, | |
2a845540 | 954 | custom_configs: Optional[List[CustomConfig]] = None, |
e306af50 | 955 | ): |
9f95a23c TL |
956 | assert service_type == 'nfs' |
957 | super(NFSServiceSpec, self).__init__( | |
958 | 'nfs', service_id=service_id, | |
f67539c2 | 959 | placement=placement, unmanaged=unmanaged, preview_only=preview_only, |
2a845540 | 960 | config=config, networks=networks, extra_container_args=extra_container_args, |
39ae355f | 961 | extra_entrypoint_args=extra_entrypoint_args, custom_configs=custom_configs) |
9f95a23c | 962 | |
b3b6e05e | 963 | self.port = port |
1e59de90 | 964 | self.virtual_ip = virtual_ip |
aee94f69 | 965 | self.enable_haproxy_protocol = enable_haproxy_protocol |
b3b6e05e TL |
966 | |
967 | def get_port_start(self) -> List[int]: | |
968 | if self.port: | |
969 | return [self.port] | |
970 | return [] | |
9f95a23c | 971 | |
1911f103 TL |
972 | def rados_config_name(self): |
973 | # type: () -> str | |
974 | return 'conf-' + self.service_name() | |
975 | ||
9f95a23c | 976 | |
f6b5b4d7 TL |
977 | yaml.add_representer(NFSServiceSpec, ServiceSpec.yaml_representer) |
978 | ||
979 | ||
9f95a23c TL |
980 | class RGWSpec(ServiceSpec): |
981 | """ | |
982 | Settings to configure a (multisite) Ceph RGW | |
983 | ||
a4b75251 TL |
984 | .. code-block:: yaml |
985 | ||
986 | service_type: rgw | |
987 | service_id: myrealm.myzone | |
988 | spec: | |
989 | rgw_realm: myrealm | |
1e59de90 | 990 | rgw_zonegroup: myzonegroup |
a4b75251 TL |
991 | rgw_zone: myzone |
992 | ssl: true | |
993 | rgw_frontend_port: 1234 | |
994 | rgw_frontend_type: beast | |
995 | rgw_frontend_ssl_certificate: ... | |
996 | ||
997 | See also: :ref:`orchestrator-cli-service-spec` | |
9f95a23c | 998 | """ |
a4b75251 | 999 | |
f67539c2 TL |
1000 | MANAGED_CONFIG_OPTIONS = ServiceSpec.MANAGED_CONFIG_OPTIONS + [ |
1001 | 'rgw_zone', | |
1002 | 'rgw_realm', | |
1e59de90 | 1003 | 'rgw_zonegroup', |
f67539c2 TL |
1004 | 'rgw_frontends', |
1005 | ] | |
1006 | ||
9f95a23c | 1007 | def __init__(self, |
e306af50 TL |
1008 | service_type: str = 'rgw', |
1009 | service_id: Optional[str] = None, | |
1010 | placement: Optional[PlacementSpec] = None, | |
1011 | rgw_realm: Optional[str] = None, | |
1e59de90 | 1012 | rgw_zonegroup: Optional[str] = None, |
e306af50 | 1013 | rgw_zone: Optional[str] = None, |
e306af50 TL |
1014 | rgw_frontend_port: Optional[int] = None, |
1015 | rgw_frontend_ssl_certificate: Optional[List[str]] = None, | |
f67539c2 | 1016 | rgw_frontend_type: Optional[str] = None, |
1e59de90 | 1017 | rgw_frontend_extra_args: Optional[List[str]] = None, |
e306af50 TL |
1018 | unmanaged: bool = False, |
1019 | ssl: bool = False, | |
f6b5b4d7 | 1020 | preview_only: bool = False, |
f67539c2 TL |
1021 | config: Optional[Dict[str, str]] = None, |
1022 | networks: Optional[List[str]] = None, | |
1023 | subcluster: Optional[str] = None, # legacy, only for from_json on upgrade | |
aee94f69 TL |
1024 | extra_container_args: Optional[GeneralArgList] = None, |
1025 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
2a845540 | 1026 | custom_configs: Optional[List[CustomConfig]] = None, |
1e59de90 TL |
1027 | rgw_realm_token: Optional[str] = None, |
1028 | update_endpoints: Optional[bool] = False, | |
1029 | zone_endpoints: Optional[str] = None # commad separated endpoints list | |
9f95a23c | 1030 | ): |
1911f103 | 1031 | assert service_type == 'rgw', service_type |
f67539c2 TL |
1032 | |
1033 | # for backward compatibility with octopus spec files, | |
1034 | if not service_id and (rgw_realm and rgw_zone): | |
1035 | service_id = rgw_realm + '.' + rgw_zone | |
1036 | ||
9f95a23c TL |
1037 | super(RGWSpec, self).__init__( |
1038 | 'rgw', service_id=service_id, | |
f6b5b4d7 | 1039 | placement=placement, unmanaged=unmanaged, |
33c7a0ef | 1040 | preview_only=preview_only, config=config, networks=networks, |
39ae355f TL |
1041 | extra_container_args=extra_container_args, extra_entrypoint_args=extra_entrypoint_args, |
1042 | custom_configs=custom_configs) | |
9f95a23c | 1043 | |
a4b75251 | 1044 | #: The RGW realm associated with this service. Needs to be manually created |
1e59de90 TL |
1045 | #: if the spec is being applied directly to cephdam. In case of rgw module |
1046 | #: the realm is created automatically. | |
a4b75251 | 1047 | self.rgw_realm: Optional[str] = rgw_realm |
1e59de90 TL |
1048 | #: The RGW zonegroup associated with this service. Needs to be manually created |
1049 | #: if the spec is being applied directly to cephdam. In case of rgw module | |
1050 | #: the zonegroup is created automatically. | |
1051 | self.rgw_zonegroup: Optional[str] = rgw_zonegroup | |
a4b75251 | 1052 | #: The RGW zone associated with this service. Needs to be manually created |
1e59de90 TL |
1053 | #: if the spec is being applied directly to cephdam. In case of rgw module |
1054 | #: the zone is created automatically. | |
a4b75251 TL |
1055 | self.rgw_zone: Optional[str] = rgw_zone |
1056 | #: Port of the RGW daemons | |
1057 | self.rgw_frontend_port: Optional[int] = rgw_frontend_port | |
1058 | #: List of SSL certificates | |
1059 | self.rgw_frontend_ssl_certificate: Optional[List[str]] = rgw_frontend_ssl_certificate | |
1060 | #: civetweb or beast (default: beast). See :ref:`rgw_frontends` | |
1061 | self.rgw_frontend_type: Optional[str] = rgw_frontend_type | |
1e59de90 TL |
1062 | #: List of extra arguments for rgw_frontend in the form opt=value. See :ref:`rgw_frontends` |
1063 | self.rgw_frontend_extra_args: Optional[List[str]] = rgw_frontend_extra_args | |
a4b75251 | 1064 | #: enable SSL |
9f95a23c | 1065 | self.ssl = ssl |
1e59de90 TL |
1066 | self.rgw_realm_token = rgw_realm_token |
1067 | self.update_endpoints = update_endpoints | |
1068 | self.zone_endpoints = zone_endpoints | |
9f95a23c | 1069 | |
f67539c2 TL |
1070 | def get_port_start(self) -> List[int]: |
1071 | return [self.get_port()] | |
1072 | ||
1073 | def get_port(self) -> int: | |
9f95a23c TL |
1074 | if self.rgw_frontend_port: |
1075 | return self.rgw_frontend_port | |
1076 | if self.ssl: | |
1077 | return 443 | |
1078 | else: | |
1079 | return 80 | |
1911f103 | 1080 | |
f67539c2 | 1081 | def validate(self) -> None: |
f6b5b4d7 TL |
1082 | super(RGWSpec, self).validate() |
1083 | ||
f67539c2 TL |
1084 | if self.rgw_realm and not self.rgw_zone: |
1085 | raise SpecValidationError( | |
1086 | 'Cannot add RGW: Realm specified but no zone specified') | |
1087 | if self.rgw_zone and not self.rgw_realm: | |
1e59de90 TL |
1088 | raise SpecValidationError('Cannot add RGW: Zone specified but no realm specified') |
1089 | ||
1090 | if self.rgw_frontend_type is not None: | |
1091 | if self.rgw_frontend_type not in ['beast', 'civetweb']: | |
1092 | raise SpecValidationError( | |
1093 | 'Invalid rgw_frontend_type value. Valid values are: beast, civetweb.\n' | |
1094 | 'Additional rgw type parameters can be passed using rgw_frontend_extra_args.' | |
1095 | ) | |
f6b5b4d7 TL |
1096 | |
1097 | ||
1098 | yaml.add_representer(RGWSpec, ServiceSpec.yaml_representer) | |
1099 | ||
1911f103 | 1100 | |
aee94f69 TL |
1101 | class NvmeofServiceSpec(ServiceSpec): |
1102 | def __init__(self, | |
1103 | service_type: str = 'nvmeof', | |
1104 | service_id: Optional[str] = None, | |
1105 | name: Optional[str] = None, | |
1106 | group: Optional[str] = None, | |
1107 | port: Optional[int] = None, | |
1108 | pool: Optional[str] = None, | |
1109 | enable_auth: bool = False, | |
1110 | server_key: Optional[str] = None, | |
1111 | server_cert: Optional[str] = None, | |
1112 | client_key: Optional[str] = None, | |
1113 | client_cert: Optional[str] = None, | |
1114 | spdk_path: Optional[str] = None, | |
1115 | tgt_path: Optional[str] = None, | |
1116 | timeout: Optional[int] = 60, | |
1117 | conn_retries: Optional[int] = 10, | |
1118 | transports: Optional[str] = 'tcp', | |
1119 | transport_tcp_options: Optional[Dict[str, int]] = | |
1120 | {"in_capsule_data_size": 8192, "max_io_qpairs_per_ctrlr": 7}, | |
1121 | tgt_cmd_extra_args: Optional[str] = None, | |
1122 | placement: Optional[PlacementSpec] = None, | |
1123 | unmanaged: bool = False, | |
1124 | preview_only: bool = False, | |
1125 | config: Optional[Dict[str, str]] = None, | |
1126 | networks: Optional[List[str]] = None, | |
1127 | extra_container_args: Optional[GeneralArgList] = None, | |
1128 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
1129 | custom_configs: Optional[List[CustomConfig]] = None, | |
1130 | ): | |
1131 | assert service_type == 'nvmeof' | |
1132 | super(NvmeofServiceSpec, self).__init__('nvmeof', service_id=service_id, | |
1133 | placement=placement, unmanaged=unmanaged, | |
1134 | preview_only=preview_only, | |
1135 | config=config, networks=networks, | |
1136 | extra_container_args=extra_container_args, | |
1137 | extra_entrypoint_args=extra_entrypoint_args, | |
1138 | custom_configs=custom_configs) | |
1139 | ||
1140 | #: RADOS pool where ceph-nvmeof config data is stored. | |
1141 | self.pool = pool | |
1142 | #: ``port`` port of the nvmeof gateway | |
1143 | self.port = port or 5500 | |
1144 | #: ``name`` name of the nvmeof gateway | |
1145 | self.name = name | |
1146 | #: ``group`` name of the nvmeof gateway | |
1147 | self.group = group | |
1148 | #: ``enable_auth`` enables user authentication on nvmeof gateway | |
1149 | self.enable_auth = enable_auth | |
1150 | #: ``server_key`` gateway server key | |
1151 | self.server_key = server_key or './server.key' | |
1152 | #: ``server_cert`` gateway server certificate | |
1153 | self.server_cert = server_cert or './server.crt' | |
1154 | #: ``client_key`` client key | |
1155 | self.client_key = client_key or './client.key' | |
1156 | #: ``client_cert`` client certificate | |
1157 | self.client_cert = client_cert or './client.crt' | |
1158 | #: ``spdk_path`` path to SPDK | |
1159 | self.spdk_path = spdk_path or '/usr/local/bin/nvmf_tgt' | |
1160 | #: ``tgt_path`` nvmeof target path | |
1161 | self.tgt_path = tgt_path or '/usr/local/bin/nvmf_tgt' | |
1162 | #: ``timeout`` ceph connectivity timeout | |
1163 | self.timeout = timeout | |
1164 | #: ``conn_retries`` ceph connection retries number | |
1165 | self.conn_retries = conn_retries | |
1166 | #: ``transports`` tcp | |
1167 | self.transports = transports | |
1168 | #: List of extra arguments for transports in the form opt=value | |
1169 | self.transport_tcp_options: Optional[Dict[str, int]] = transport_tcp_options | |
1170 | #: ``tgt_cmd_extra_args`` extra arguments for the nvmf_tgt process | |
1171 | self.tgt_cmd_extra_args = tgt_cmd_extra_args | |
1172 | ||
1173 | def get_port_start(self) -> List[int]: | |
1174 | return [5500, 4420, 8009] | |
1175 | ||
1176 | def validate(self) -> None: | |
1177 | # TODO: what other parameters should be validated as part of this function? | |
1178 | super(NvmeofServiceSpec, self).validate() | |
1179 | ||
1180 | if not self.pool: | |
1181 | raise SpecValidationError('Cannot add NVMEOF: No Pool specified') | |
1182 | ||
1183 | if self.enable_auth: | |
1184 | if not any([self.server_key, self.server_cert, self.client_key, self.client_cert]): | |
1185 | raise SpecValidationError( | |
1186 | 'enable_auth is true but client/server certificates are missing') | |
1187 | ||
1188 | if self.transports not in ['tcp']: | |
1189 | raise SpecValidationError('Invalid transport. Valid values are tcp') | |
1190 | ||
1191 | ||
1192 | yaml.add_representer(NvmeofServiceSpec, ServiceSpec.yaml_representer) | |
1193 | ||
1194 | ||
1911f103 | 1195 | class IscsiServiceSpec(ServiceSpec): |
e306af50 TL |
1196 | def __init__(self, |
1197 | service_type: str = 'iscsi', | |
1198 | service_id: Optional[str] = None, | |
1199 | pool: Optional[str] = None, | |
1200 | trusted_ip_list: Optional[str] = None, | |
39ae355f TL |
1201 | api_port: Optional[int] = 5000, |
1202 | api_user: Optional[str] = 'admin', | |
1203 | api_password: Optional[str] = 'admin', | |
e306af50 TL |
1204 | api_secure: Optional[bool] = None, |
1205 | ssl_cert: Optional[str] = None, | |
1206 | ssl_key: Optional[str] = None, | |
1207 | placement: Optional[PlacementSpec] = None, | |
f6b5b4d7 | 1208 | unmanaged: bool = False, |
f67539c2 TL |
1209 | preview_only: bool = False, |
1210 | config: Optional[Dict[str, str]] = None, | |
1211 | networks: Optional[List[str]] = None, | |
aee94f69 TL |
1212 | extra_container_args: Optional[GeneralArgList] = None, |
1213 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
2a845540 | 1214 | custom_configs: Optional[List[CustomConfig]] = None, |
e306af50 | 1215 | ): |
1911f103 TL |
1216 | assert service_type == 'iscsi' |
1217 | super(IscsiServiceSpec, self).__init__('iscsi', service_id=service_id, | |
f6b5b4d7 | 1218 | placement=placement, unmanaged=unmanaged, |
f67539c2 | 1219 | preview_only=preview_only, |
33c7a0ef | 1220 | config=config, networks=networks, |
2a845540 | 1221 | extra_container_args=extra_container_args, |
39ae355f | 1222 | extra_entrypoint_args=extra_entrypoint_args, |
2a845540 | 1223 | custom_configs=custom_configs) |
1911f103 TL |
1224 | |
1225 | #: RADOS pool where ceph-iscsi config data is stored. | |
1226 | self.pool = pool | |
a4b75251 | 1227 | #: list of trusted IP addresses |
1911f103 | 1228 | self.trusted_ip_list = trusted_ip_list |
a4b75251 | 1229 | #: ``api_port`` as defined in the ``iscsi-gateway.cfg`` |
1911f103 | 1230 | self.api_port = api_port |
a4b75251 | 1231 | #: ``api_user`` as defined in the ``iscsi-gateway.cfg`` |
1911f103 | 1232 | self.api_user = api_user |
a4b75251 | 1233 | #: ``api_password`` as defined in the ``iscsi-gateway.cfg`` |
1911f103 | 1234 | self.api_password = api_password |
a4b75251 | 1235 | #: ``api_secure`` as defined in the ``iscsi-gateway.cfg`` |
1911f103 | 1236 | self.api_secure = api_secure |
a4b75251 | 1237 | #: SSL certificate |
1911f103 | 1238 | self.ssl_cert = ssl_cert |
a4b75251 | 1239 | #: SSL private key |
1911f103 TL |
1240 | self.ssl_key = ssl_key |
1241 | ||
e306af50 TL |
1242 | if not self.api_secure and self.ssl_cert and self.ssl_key: |
1243 | self.api_secure = True | |
1244 | ||
39ae355f TL |
1245 | def get_port_start(self) -> List[int]: |
1246 | return [self.api_port or 5000] | |
1247 | ||
f67539c2 | 1248 | def validate(self) -> None: |
e306af50 | 1249 | super(IscsiServiceSpec, self).validate() |
1911f103 TL |
1250 | |
1251 | if not self.pool: | |
f67539c2 | 1252 | raise SpecValidationError( |
1911f103 | 1253 | 'Cannot add ISCSI: No Pool specified') |
adb31ebb TL |
1254 | |
1255 | # Do not need to check for api_user and api_password as they | |
1256 | # now default to 'admin' when setting up the gateway url. Older | |
1257 | # iSCSI specs from before this change should be fine as they will | |
1258 | # have been required to have an api_user and api_password set and | |
1259 | # will be unaffected by the new default value. | |
f6b5b4d7 TL |
1260 | |
1261 | ||
1262 | yaml.add_representer(IscsiServiceSpec, ServiceSpec.yaml_representer) | |
1263 | ||
1264 | ||
f67539c2 TL |
1265 | class IngressSpec(ServiceSpec): |
1266 | def __init__(self, | |
1267 | service_type: str = 'ingress', | |
1268 | service_id: Optional[str] = None, | |
1269 | config: Optional[Dict[str, str]] = None, | |
1270 | networks: Optional[List[str]] = None, | |
1271 | placement: Optional[PlacementSpec] = None, | |
1272 | backend_service: Optional[str] = None, | |
1273 | frontend_port: Optional[int] = None, | |
1274 | ssl_cert: Optional[str] = None, | |
b3b6e05e | 1275 | ssl_key: Optional[str] = None, |
f67539c2 TL |
1276 | ssl_dh_param: Optional[str] = None, |
1277 | ssl_ciphers: Optional[List[str]] = None, | |
1278 | ssl_options: Optional[List[str]] = None, | |
1279 | monitor_port: Optional[int] = None, | |
1280 | monitor_user: Optional[str] = None, | |
1281 | monitor_password: Optional[str] = None, | |
1282 | enable_stats: Optional[bool] = None, | |
1283 | keepalived_password: Optional[str] = None, | |
1284 | virtual_ip: Optional[str] = None, | |
2a845540 | 1285 | virtual_ips_list: Optional[List[str]] = None, |
f67539c2 | 1286 | virtual_interface_networks: Optional[List[str]] = [], |
aee94f69 TL |
1287 | use_keepalived_multicast: Optional[bool] = False, |
1288 | vrrp_interface_network: Optional[str] = None, | |
1289 | first_virtual_router_id: Optional[int] = 50, | |
b3b6e05e | 1290 | unmanaged: bool = False, |
33c7a0ef | 1291 | ssl: bool = False, |
1e59de90 | 1292 | keepalive_only: bool = False, |
aee94f69 TL |
1293 | extra_container_args: Optional[GeneralArgList] = None, |
1294 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
1295 | enable_haproxy_protocol: bool = False, | |
2a845540 | 1296 | custom_configs: Optional[List[CustomConfig]] = None, |
f67539c2 TL |
1297 | ): |
1298 | assert service_type == 'ingress' | |
2a845540 | 1299 | |
f67539c2 TL |
1300 | super(IngressSpec, self).__init__( |
1301 | 'ingress', service_id=service_id, | |
1302 | placement=placement, config=config, | |
33c7a0ef | 1303 | networks=networks, |
2a845540 | 1304 | extra_container_args=extra_container_args, |
39ae355f | 1305 | extra_entrypoint_args=extra_entrypoint_args, |
2a845540 | 1306 | custom_configs=custom_configs |
f67539c2 TL |
1307 | ) |
1308 | self.backend_service = backend_service | |
1309 | self.frontend_port = frontend_port | |
1310 | self.ssl_cert = ssl_cert | |
b3b6e05e | 1311 | self.ssl_key = ssl_key |
f67539c2 TL |
1312 | self.ssl_dh_param = ssl_dh_param |
1313 | self.ssl_ciphers = ssl_ciphers | |
1314 | self.ssl_options = ssl_options | |
1315 | self.monitor_port = monitor_port | |
1316 | self.monitor_user = monitor_user | |
1317 | self.monitor_password = monitor_password | |
1318 | self.keepalived_password = keepalived_password | |
1319 | self.virtual_ip = virtual_ip | |
2a845540 | 1320 | self.virtual_ips_list = virtual_ips_list |
f67539c2 | 1321 | self.virtual_interface_networks = virtual_interface_networks or [] |
aee94f69 TL |
1322 | self.use_keepalived_multicast = use_keepalived_multicast |
1323 | self.vrrp_interface_network = vrrp_interface_network | |
1324 | self.first_virtual_router_id = first_virtual_router_id | |
b3b6e05e TL |
1325 | self.unmanaged = unmanaged |
1326 | self.ssl = ssl | |
1e59de90 | 1327 | self.keepalive_only = keepalive_only |
aee94f69 | 1328 | self.enable_haproxy_protocol = enable_haproxy_protocol |
f67539c2 TL |
1329 | |
1330 | def get_port_start(self) -> List[int]: | |
1e59de90 TL |
1331 | ports = [] |
1332 | if self.frontend_port is not None: | |
1333 | ports.append(cast(int, self.frontend_port)) | |
1334 | if self.monitor_port is not None: | |
1335 | ports.append(cast(int, self.monitor_port)) | |
1336 | return ports | |
f67539c2 TL |
1337 | |
1338 | def get_virtual_ip(self) -> Optional[str]: | |
1339 | return self.virtual_ip | |
1340 | ||
1341 | def validate(self) -> None: | |
1342 | super(IngressSpec, self).validate() | |
1343 | ||
1344 | if not self.backend_service: | |
1345 | raise SpecValidationError( | |
1346 | 'Cannot add ingress: No backend_service specified') | |
1e59de90 | 1347 | if not self.keepalive_only and not self.frontend_port: |
f67539c2 TL |
1348 | raise SpecValidationError( |
1349 | 'Cannot add ingress: No frontend_port specified') | |
1350 | if not self.monitor_port: | |
1351 | raise SpecValidationError( | |
1352 | 'Cannot add ingress: No monitor_port specified') | |
2a845540 | 1353 | if not self.virtual_ip and not self.virtual_ips_list: |
f67539c2 TL |
1354 | raise SpecValidationError( |
1355 | 'Cannot add ingress: No virtual_ip provided') | |
2a845540 TL |
1356 | if self.virtual_ip is not None and self.virtual_ips_list is not None: |
1357 | raise SpecValidationError( | |
1358 | 'Cannot add ingress: Single and multiple virtual IPs specified') | |
f67539c2 TL |
1359 | |
1360 | ||
b3b6e05e TL |
1361 | yaml.add_representer(IngressSpec, ServiceSpec.yaml_representer) |
1362 | ||
1363 | ||
f91f0fd5 TL |
1364 | class CustomContainerSpec(ServiceSpec): |
1365 | def __init__(self, | |
1366 | service_type: str = 'container', | |
f67539c2 TL |
1367 | service_id: Optional[str] = None, |
1368 | config: Optional[Dict[str, str]] = None, | |
1369 | networks: Optional[List[str]] = None, | |
f91f0fd5 TL |
1370 | placement: Optional[PlacementSpec] = None, |
1371 | unmanaged: bool = False, | |
1372 | preview_only: bool = False, | |
f67539c2 | 1373 | image: Optional[str] = None, |
f91f0fd5 | 1374 | entrypoint: Optional[str] = None, |
aee94f69 | 1375 | extra_entrypoint_args: Optional[GeneralArgList] = None, |
f91f0fd5 TL |
1376 | uid: Optional[int] = None, |
1377 | gid: Optional[int] = None, | |
1378 | volume_mounts: Optional[Dict[str, str]] = {}, | |
aee94f69 TL |
1379 | # args are for the container runtime, not entrypoint |
1380 | args: Optional[GeneralArgList] = [], | |
f91f0fd5 TL |
1381 | envs: Optional[List[str]] = [], |
1382 | privileged: Optional[bool] = False, | |
1383 | bind_mounts: Optional[List[List[str]]] = None, | |
1384 | ports: Optional[List[int]] = [], | |
1385 | dirs: Optional[List[str]] = [], | |
1386 | files: Optional[Dict[str, Any]] = {}, | |
1387 | ): | |
1388 | assert service_type == 'container' | |
1389 | assert service_id is not None | |
1390 | assert image is not None | |
1391 | ||
1392 | super(CustomContainerSpec, self).__init__( | |
1393 | service_type, service_id, | |
1394 | placement=placement, unmanaged=unmanaged, | |
f67539c2 | 1395 | preview_only=preview_only, config=config, |
39ae355f | 1396 | networks=networks, extra_entrypoint_args=extra_entrypoint_args) |
f91f0fd5 TL |
1397 | |
1398 | self.image = image | |
1399 | self.entrypoint = entrypoint | |
1400 | self.uid = uid | |
1401 | self.gid = gid | |
1402 | self.volume_mounts = volume_mounts | |
1403 | self.args = args | |
1404 | self.envs = envs | |
1405 | self.privileged = privileged | |
1406 | self.bind_mounts = bind_mounts | |
1407 | self.ports = ports | |
1408 | self.dirs = dirs | |
1409 | self.files = files | |
1410 | ||
1411 | def config_json(self) -> Dict[str, Any]: | |
1412 | """ | |
1413 | Helper function to get the value of the `--config-json` cephadm | |
1414 | command line option. It will contain all specification properties | |
1415 | that haven't a `None` value. Such properties will get default | |
1416 | values in cephadm. | |
1417 | :return: Returns a dictionary containing all specification | |
1418 | properties. | |
1419 | """ | |
1420 | config_json = {} | |
1421 | for prop in ['image', 'entrypoint', 'uid', 'gid', 'args', | |
1422 | 'envs', 'volume_mounts', 'privileged', | |
1423 | 'bind_mounts', 'ports', 'dirs', 'files']: | |
1424 | value = getattr(self, prop) | |
1425 | if value is not None: | |
1426 | config_json[prop] = value | |
1427 | return config_json | |
1428 | ||
1429 | ||
1430 | yaml.add_representer(CustomContainerSpec, ServiceSpec.yaml_representer) | |
b3b6e05e TL |
1431 | |
1432 | ||
1433 | class MonitoringSpec(ServiceSpec): | |
1434 | def __init__(self, | |
1435 | service_type: str, | |
1436 | service_id: Optional[str] = None, | |
1437 | config: Optional[Dict[str, str]] = None, | |
1438 | networks: Optional[List[str]] = None, | |
1439 | placement: Optional[PlacementSpec] = None, | |
1440 | unmanaged: bool = False, | |
1441 | preview_only: bool = False, | |
1442 | port: Optional[int] = None, | |
aee94f69 TL |
1443 | extra_container_args: Optional[GeneralArgList] = None, |
1444 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
2a845540 | 1445 | custom_configs: Optional[List[CustomConfig]] = None, |
b3b6e05e | 1446 | ): |
33c7a0ef TL |
1447 | assert service_type in ['grafana', 'node-exporter', 'prometheus', 'alertmanager', |
1448 | 'loki', 'promtail'] | |
b3b6e05e TL |
1449 | |
1450 | super(MonitoringSpec, self).__init__( | |
1451 | service_type, service_id, | |
1452 | placement=placement, unmanaged=unmanaged, | |
1453 | preview_only=preview_only, config=config, | |
2a845540 | 1454 | networks=networks, extra_container_args=extra_container_args, |
39ae355f | 1455 | extra_entrypoint_args=extra_entrypoint_args, |
2a845540 | 1456 | custom_configs=custom_configs) |
b3b6e05e TL |
1457 | |
1458 | self.service_type = service_type | |
1459 | self.port = port | |
1460 | ||
1461 | def get_port_start(self) -> List[int]: | |
1462 | return [self.get_port()] | |
1463 | ||
1464 | def get_port(self) -> int: | |
1465 | if self.port: | |
1466 | return self.port | |
1467 | else: | |
1468 | return {'prometheus': 9095, | |
1469 | 'node-exporter': 9100, | |
20effc67 | 1470 | 'alertmanager': 9093, |
33c7a0ef TL |
1471 | 'grafana': 3000, |
1472 | 'loki': 3100, | |
1473 | 'promtail': 9080}[self.service_type] | |
20effc67 TL |
1474 | |
1475 | ||
1476 | yaml.add_representer(MonitoringSpec, ServiceSpec.yaml_representer) | |
1477 | ||
1478 | ||
1479 | class AlertManagerSpec(MonitoringSpec): | |
1480 | def __init__(self, | |
1481 | service_type: str = 'alertmanager', | |
1482 | service_id: Optional[str] = None, | |
1483 | placement: Optional[PlacementSpec] = None, | |
1484 | unmanaged: bool = False, | |
1485 | preview_only: bool = False, | |
1486 | user_data: Optional[Dict[str, Any]] = None, | |
1487 | config: Optional[Dict[str, str]] = None, | |
1488 | networks: Optional[List[str]] = None, | |
1489 | port: Optional[int] = None, | |
33c7a0ef | 1490 | secure: bool = False, |
aee94f69 TL |
1491 | extra_container_args: Optional[GeneralArgList] = None, |
1492 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
2a845540 | 1493 | custom_configs: Optional[List[CustomConfig]] = None, |
20effc67 TL |
1494 | ): |
1495 | assert service_type == 'alertmanager' | |
1496 | super(AlertManagerSpec, self).__init__( | |
1497 | 'alertmanager', service_id=service_id, | |
1498 | placement=placement, unmanaged=unmanaged, | |
33c7a0ef | 1499 | preview_only=preview_only, config=config, networks=networks, port=port, |
39ae355f TL |
1500 | extra_container_args=extra_container_args, extra_entrypoint_args=extra_entrypoint_args, |
1501 | custom_configs=custom_configs) | |
20effc67 TL |
1502 | |
1503 | # Custom configuration. | |
1504 | # | |
1505 | # Example: | |
1506 | # service_type: alertmanager | |
1507 | # service_id: xyz | |
1508 | # user_data: | |
1509 | # default_webhook_urls: | |
1510 | # - "https://foo" | |
1511 | # - "https://bar" | |
1512 | # | |
1513 | # Documentation: | |
1514 | # default_webhook_urls - A list of additional URL's that are | |
1515 | # added to the default receivers' | |
1516 | # <webhook_configs> configuration. | |
1517 | self.user_data = user_data or {} | |
33c7a0ef | 1518 | self.secure = secure |
20effc67 TL |
1519 | |
1520 | def get_port_start(self) -> List[int]: | |
1521 | return [self.get_port(), 9094] | |
1522 | ||
1523 | def validate(self) -> None: | |
1524 | super(AlertManagerSpec, self).validate() | |
1525 | ||
1526 | if self.port == 9094: | |
1527 | raise SpecValidationError( | |
1528 | 'Port 9094 is reserved for AlertManager cluster listen address') | |
1529 | ||
1530 | ||
1531 | yaml.add_representer(AlertManagerSpec, ServiceSpec.yaml_representer) | |
1532 | ||
1533 | ||
1534 | class GrafanaSpec(MonitoringSpec): | |
1535 | def __init__(self, | |
1536 | service_type: str = 'grafana', | |
1537 | service_id: Optional[str] = None, | |
1538 | placement: Optional[PlacementSpec] = None, | |
1539 | unmanaged: bool = False, | |
1540 | preview_only: bool = False, | |
1541 | config: Optional[Dict[str, str]] = None, | |
1542 | networks: Optional[List[str]] = None, | |
1543 | port: Optional[int] = None, | |
1e59de90 | 1544 | protocol: Optional[str] = 'https', |
33c7a0ef | 1545 | initial_admin_password: Optional[str] = None, |
1e59de90 | 1546 | anonymous_access: Optional[bool] = True, |
aee94f69 TL |
1547 | extra_container_args: Optional[GeneralArgList] = None, |
1548 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
2a845540 | 1549 | custom_configs: Optional[List[CustomConfig]] = None, |
20effc67 TL |
1550 | ): |
1551 | assert service_type == 'grafana' | |
1552 | super(GrafanaSpec, self).__init__( | |
1553 | 'grafana', service_id=service_id, | |
1554 | placement=placement, unmanaged=unmanaged, | |
33c7a0ef | 1555 | preview_only=preview_only, config=config, networks=networks, port=port, |
39ae355f TL |
1556 | extra_container_args=extra_container_args, extra_entrypoint_args=extra_entrypoint_args, |
1557 | custom_configs=custom_configs) | |
20effc67 TL |
1558 | |
1559 | self.initial_admin_password = initial_admin_password | |
1e59de90 TL |
1560 | self.anonymous_access = anonymous_access |
1561 | self.protocol = protocol | |
1562 | ||
1563 | def validate(self) -> None: | |
1564 | super(GrafanaSpec, self).validate() | |
1565 | if self.protocol not in ['http', 'https']: | |
1566 | err_msg = f"Invalid protocol '{self.protocol}'. Valid values are: 'http', 'https'." | |
1567 | raise SpecValidationError(err_msg) | |
1568 | ||
1569 | if not self.anonymous_access and not self.initial_admin_password: | |
1570 | err_msg = ('Either initial_admin_password must be set or anonymous_access ' | |
1571 | 'must be set to true. Otherwise the grafana dashboard will ' | |
1572 | 'be inaccessible.') | |
1573 | raise SpecValidationError(err_msg) | |
20effc67 TL |
1574 | |
1575 | ||
1576 | yaml.add_representer(GrafanaSpec, ServiceSpec.yaml_representer) | |
1577 | ||
1578 | ||
39ae355f TL |
1579 | class PrometheusSpec(MonitoringSpec): |
1580 | def __init__(self, | |
1581 | service_type: str = 'prometheus', | |
1582 | service_id: Optional[str] = None, | |
1583 | placement: Optional[PlacementSpec] = None, | |
1584 | unmanaged: bool = False, | |
1585 | preview_only: bool = False, | |
1586 | config: Optional[Dict[str, str]] = None, | |
1587 | networks: Optional[List[str]] = None, | |
1588 | port: Optional[int] = None, | |
1589 | retention_time: Optional[str] = None, | |
1590 | retention_size: Optional[str] = None, | |
aee94f69 TL |
1591 | extra_container_args: Optional[GeneralArgList] = None, |
1592 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
39ae355f TL |
1593 | custom_configs: Optional[List[CustomConfig]] = None, |
1594 | ): | |
1595 | assert service_type == 'prometheus' | |
1596 | super(PrometheusSpec, self).__init__( | |
1597 | 'prometheus', service_id=service_id, | |
1598 | placement=placement, unmanaged=unmanaged, | |
1599 | preview_only=preview_only, config=config, networks=networks, port=port, | |
1600 | extra_container_args=extra_container_args, extra_entrypoint_args=extra_entrypoint_args, | |
1601 | custom_configs=custom_configs) | |
1602 | ||
1603 | self.retention_time = retention_time.strip() if retention_time else None | |
1604 | self.retention_size = retention_size.strip() if retention_size else None | |
1605 | ||
1e59de90 TL |
1606 | def validate(self) -> None: |
1607 | super(PrometheusSpec, self).validate() | |
1608 | ||
1609 | if self.retention_time: | |
1610 | valid_units = ['y', 'w', 'd', 'h', 'm', 's'] | |
1611 | m = re.search(rf"^(\d+)({'|'.join(valid_units)})$", self.retention_time) | |
1612 | if not m: | |
1613 | units = ', '.join(valid_units) | |
1614 | raise SpecValidationError(f"Invalid retention time. Valid units are: {units}") | |
1615 | if self.retention_size: | |
1616 | valid_units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] | |
1617 | m = re.search(rf"^(\d+)({'|'.join(valid_units)})$", self.retention_size) | |
1618 | if not m: | |
1619 | units = ', '.join(valid_units) | |
1620 | raise SpecValidationError(f"Invalid retention size. Valid units are: {units}") | |
1621 | ||
39ae355f TL |
1622 | |
1623 | yaml.add_representer(PrometheusSpec, ServiceSpec.yaml_representer) | |
1624 | ||
1625 | ||
20effc67 TL |
1626 | class SNMPGatewaySpec(ServiceSpec): |
1627 | class SNMPVersion(str, enum.Enum): | |
1628 | V2c = 'V2c' | |
1629 | V3 = 'V3' | |
1630 | ||
1631 | def to_json(self) -> str: | |
1632 | return self.value | |
1633 | ||
1634 | class SNMPAuthType(str, enum.Enum): | |
1635 | MD5 = 'MD5' | |
1636 | SHA = 'SHA' | |
1637 | ||
1638 | def to_json(self) -> str: | |
1639 | return self.value | |
1640 | ||
1641 | class SNMPPrivacyType(str, enum.Enum): | |
1642 | DES = 'DES' | |
1643 | AES = 'AES' | |
1644 | ||
1645 | def to_json(self) -> str: | |
1646 | return self.value | |
1647 | ||
1648 | valid_destination_types = [ | |
1649 | 'Name:Port', | |
1650 | 'IPv4:Port' | |
1651 | ] | |
1652 | ||
1653 | def __init__(self, | |
1654 | service_type: str = 'snmp-gateway', | |
1655 | snmp_version: Optional[SNMPVersion] = None, | |
1656 | snmp_destination: str = '', | |
1657 | credentials: Dict[str, str] = {}, | |
1658 | engine_id: Optional[str] = None, | |
1659 | auth_protocol: Optional[SNMPAuthType] = None, | |
1660 | privacy_protocol: Optional[SNMPPrivacyType] = None, | |
1661 | placement: Optional[PlacementSpec] = None, | |
1662 | unmanaged: bool = False, | |
1663 | preview_only: bool = False, | |
1664 | port: Optional[int] = None, | |
aee94f69 TL |
1665 | extra_container_args: Optional[GeneralArgList] = None, |
1666 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
2a845540 | 1667 | custom_configs: Optional[List[CustomConfig]] = None, |
20effc67 TL |
1668 | ): |
1669 | assert service_type == 'snmp-gateway' | |
1670 | ||
1671 | super(SNMPGatewaySpec, self).__init__( | |
1672 | service_type, | |
1673 | placement=placement, | |
1674 | unmanaged=unmanaged, | |
33c7a0ef | 1675 | preview_only=preview_only, |
2a845540 | 1676 | extra_container_args=extra_container_args, |
39ae355f | 1677 | extra_entrypoint_args=extra_entrypoint_args, |
2a845540 | 1678 | custom_configs=custom_configs) |
20effc67 TL |
1679 | |
1680 | self.service_type = service_type | |
1681 | self.snmp_version = snmp_version | |
1682 | self.snmp_destination = snmp_destination | |
1683 | self.port = port | |
1684 | self.credentials = credentials | |
1685 | self.engine_id = engine_id | |
1686 | self.auth_protocol = auth_protocol | |
1687 | self.privacy_protocol = privacy_protocol | |
1688 | ||
1689 | @classmethod | |
1690 | def _from_json_impl(cls, json_spec: dict) -> 'SNMPGatewaySpec': | |
1691 | ||
1692 | cpy = json_spec.copy() | |
1693 | types = [ | |
1694 | ('snmp_version', SNMPGatewaySpec.SNMPVersion), | |
1695 | ('auth_protocol', SNMPGatewaySpec.SNMPAuthType), | |
1696 | ('privacy_protocol', SNMPGatewaySpec.SNMPPrivacyType), | |
1697 | ] | |
1698 | for d in cpy, cpy.get('spec', {}): | |
1699 | for key, enum_cls in types: | |
1700 | try: | |
1701 | if key in d: | |
1702 | d[key] = enum_cls(d[key]) | |
1703 | except ValueError: | |
1704 | raise SpecValidationError(f'{key} unsupported. Must be one of ' | |
1705 | f'{", ".join(enum_cls)}') | |
1706 | return super(SNMPGatewaySpec, cls)._from_json_impl(cpy) | |
1707 | ||
1708 | @property | |
1709 | def ports(self) -> List[int]: | |
1710 | return [self.port or 9464] | |
1711 | ||
1712 | def get_port_start(self) -> List[int]: | |
1713 | return self.ports | |
1714 | ||
1715 | def validate(self) -> None: | |
1716 | super(SNMPGatewaySpec, self).validate() | |
1717 | ||
1718 | if not self.credentials: | |
1719 | raise SpecValidationError( | |
1720 | 'Missing authentication information (credentials). ' | |
1721 | 'SNMP V2c and V3 require credential information' | |
1722 | ) | |
1723 | elif not self.snmp_version: | |
1724 | raise SpecValidationError( | |
1725 | 'Missing SNMP version (snmp_version)' | |
1726 | ) | |
1727 | ||
1728 | creds_requirement = { | |
1729 | 'V2c': ['snmp_community'], | |
1730 | 'V3': ['snmp_v3_auth_username', 'snmp_v3_auth_password'] | |
1731 | } | |
1732 | if self.privacy_protocol: | |
1733 | creds_requirement['V3'].append('snmp_v3_priv_password') | |
1734 | ||
1735 | missing = [parm for parm in creds_requirement[self.snmp_version] | |
1736 | if parm not in self.credentials] | |
1737 | # check that credentials are correct for the version | |
1738 | if missing: | |
1739 | raise SpecValidationError( | |
1740 | f'SNMP {self.snmp_version} credentials are incomplete. Missing {", ".join(missing)}' | |
1741 | ) | |
1742 | ||
1743 | if self.engine_id: | |
1744 | if 10 <= len(self.engine_id) <= 64 and \ | |
1745 | is_hex(self.engine_id) and \ | |
1746 | len(self.engine_id) % 2 == 0: | |
1747 | pass | |
1748 | else: | |
1749 | raise SpecValidationError( | |
1750 | 'engine_id must be a string containing 10-64 hex characters. ' | |
1751 | 'Its length must be divisible by 2' | |
1752 | ) | |
1753 | ||
1754 | else: | |
1755 | if self.snmp_version == 'V3': | |
1756 | raise SpecValidationError( | |
1757 | 'Must provide an engine_id for SNMP V3 notifications' | |
1758 | ) | |
1759 | ||
1760 | if not self.snmp_destination: | |
1761 | raise SpecValidationError( | |
1762 | 'SNMP destination (snmp_destination) must be provided' | |
1763 | ) | |
1764 | else: | |
1765 | valid, description = valid_addr(self.snmp_destination) | |
1766 | if not valid: | |
1767 | raise SpecValidationError( | |
1768 | f'SNMP destination (snmp_destination) is invalid: {description}' | |
1769 | ) | |
1770 | if description not in self.valid_destination_types: | |
1771 | raise SpecValidationError( | |
1772 | f'SNMP destination (snmp_destination) type ({description}) is invalid. ' | |
1773 | f'Must be either: {", ".join(sorted(self.valid_destination_types))}' | |
1774 | ) | |
1775 | ||
1776 | ||
1777 | yaml.add_representer(SNMPGatewaySpec, ServiceSpec.yaml_representer) | |
33c7a0ef TL |
1778 | |
1779 | ||
1780 | class MDSSpec(ServiceSpec): | |
1781 | def __init__(self, | |
1782 | service_type: str = 'mds', | |
1783 | service_id: Optional[str] = None, | |
1784 | placement: Optional[PlacementSpec] = None, | |
1785 | config: Optional[Dict[str, str]] = None, | |
1786 | unmanaged: bool = False, | |
1787 | preview_only: bool = False, | |
aee94f69 TL |
1788 | extra_container_args: Optional[GeneralArgList] = None, |
1789 | extra_entrypoint_args: Optional[GeneralArgList] = None, | |
2a845540 | 1790 | custom_configs: Optional[List[CustomConfig]] = None, |
33c7a0ef TL |
1791 | ): |
1792 | assert service_type == 'mds' | |
1793 | super(MDSSpec, self).__init__('mds', service_id=service_id, | |
1794 | placement=placement, | |
1795 | config=config, | |
1796 | unmanaged=unmanaged, | |
1797 | preview_only=preview_only, | |
2a845540 | 1798 | extra_container_args=extra_container_args, |
39ae355f | 1799 | extra_entrypoint_args=extra_entrypoint_args, |
2a845540 | 1800 | custom_configs=custom_configs) |
33c7a0ef TL |
1801 | |
1802 | def validate(self) -> None: | |
1803 | super(MDSSpec, self).validate() | |
1804 | ||
1805 | if str(self.service_id)[0].isdigit(): | |
1806 | raise SpecValidationError('MDS service id cannot start with a numeric digit') | |
1807 | ||
1808 | ||
1809 | yaml.add_representer(MDSSpec, ServiceSpec.yaml_representer) | |
2a845540 TL |
1810 | |
1811 | ||
1e59de90 TL |
1812 | class MONSpec(ServiceSpec): |
1813 | def __init__(self, | |
1814 | service_type: str, | |
1815 | service_id: Optional[str] = None, | |
1816 | placement: Optional[PlacementSpec] = None, | |
1817 | count: Optional[int] = None, | |
1818 | config: Optional[Dict[str, str]] = None, | |
1819 | unmanaged: bool = False, | |
1820 | preview_only: bool = False, | |
1821 | networks: Optional[List[str]] = None, | |
aee94f69 | 1822 | extra_container_args: Optional[GeneralArgList] = None, |
1e59de90 TL |
1823 | custom_configs: Optional[List[CustomConfig]] = None, |
1824 | crush_locations: Optional[Dict[str, List[str]]] = None, | |
1825 | ): | |
1826 | assert service_type == 'mon' | |
1827 | super(MONSpec, self).__init__('mon', service_id=service_id, | |
1828 | placement=placement, | |
1829 | count=count, | |
1830 | config=config, | |
1831 | unmanaged=unmanaged, | |
1832 | preview_only=preview_only, | |
1833 | networks=networks, | |
1834 | extra_container_args=extra_container_args, | |
1835 | custom_configs=custom_configs) | |
1836 | ||
1837 | self.crush_locations = crush_locations | |
1838 | self.validate() | |
1839 | ||
1840 | def validate(self) -> None: | |
1841 | if self.crush_locations: | |
1842 | for host, crush_locs in self.crush_locations.items(): | |
1843 | try: | |
1844 | assert_valid_host(host) | |
1845 | except SpecValidationError as e: | |
1846 | err_str = f'Invalid hostname found in spec crush locations: {e}' | |
1847 | raise SpecValidationError(err_str) | |
1848 | for cloc in crush_locs: | |
1849 | if '=' not in cloc or len(cloc.split('=')) != 2: | |
1850 | err_str = ('Crush locations must be of form <bucket>=<location>. ' | |
1851 | f'Found crush location: {cloc}') | |
1852 | raise SpecValidationError(err_str) | |
1853 | ||
1854 | ||
1855 | yaml.add_representer(MONSpec, ServiceSpec.yaml_representer) | |
1856 | ||
1857 | ||
1858 | class TracingSpec(ServiceSpec): | |
1859 | SERVICE_TYPES = ['elasticsearch', 'jaeger-collector', 'jaeger-query', 'jaeger-agent'] | |
1860 | ||
1861 | def __init__(self, | |
1862 | service_type: str, | |
1863 | es_nodes: Optional[str] = None, | |
1864 | without_query: bool = False, | |
1865 | service_id: Optional[str] = None, | |
1866 | config: Optional[Dict[str, str]] = None, | |
1867 | networks: Optional[List[str]] = None, | |
1868 | placement: Optional[PlacementSpec] = None, | |
1869 | unmanaged: bool = False, | |
1870 | preview_only: bool = False | |
1871 | ): | |
1872 | assert service_type in TracingSpec.SERVICE_TYPES + ['jaeger-tracing'] | |
1873 | ||
1874 | super(TracingSpec, self).__init__( | |
1875 | service_type, service_id, | |
1876 | placement=placement, unmanaged=unmanaged, | |
1877 | preview_only=preview_only, config=config, | |
1878 | networks=networks) | |
1879 | self.without_query = without_query | |
1880 | self.es_nodes = es_nodes | |
1881 | ||
1882 | def get_port_start(self) -> List[int]: | |
1883 | return [self.get_port()] | |
1884 | ||
1885 | def get_port(self) -> int: | |
1886 | return {'elasticsearch': 9200, | |
1887 | 'jaeger-agent': 6799, | |
1888 | 'jaeger-collector': 14250, | |
1889 | 'jaeger-query': 16686}[self.service_type] | |
1890 | ||
1891 | def get_tracing_specs(self) -> List[ServiceSpec]: | |
1892 | assert self.service_type == 'jaeger-tracing' | |
1893 | specs: List[ServiceSpec] = [] | |
1894 | daemons: Dict[str, Optional[PlacementSpec]] = { | |
1895 | daemon: None for daemon in TracingSpec.SERVICE_TYPES} | |
1896 | ||
1897 | if self.es_nodes: | |
1898 | del daemons['elasticsearch'] | |
1899 | if self.without_query: | |
1900 | del daemons['jaeger-query'] | |
1901 | if self.placement: | |
1902 | daemons.update({'jaeger-collector': self.placement}) | |
1903 | ||
1904 | for daemon, daemon_placement in daemons.items(): | |
1905 | specs.append(TracingSpec(service_type=daemon, | |
1906 | es_nodes=self.es_nodes, | |
1907 | placement=daemon_placement, | |
1908 | unmanaged=self.unmanaged, | |
1909 | config=self.config, | |
1910 | networks=self.networks, | |
1911 | preview_only=self.preview_only | |
1912 | )) | |
1913 | return specs | |
1914 | ||
1915 | ||
1916 | yaml.add_representer(TracingSpec, ServiceSpec.yaml_representer) | |
1917 | ||
1918 | ||
2a845540 TL |
1919 | class TunedProfileSpec(): |
1920 | def __init__(self, | |
1921 | profile_name: str, | |
1922 | placement: Optional[PlacementSpec] = None, | |
1923 | settings: Optional[Dict[str, str]] = None, | |
1924 | ): | |
1925 | self.profile_name = profile_name | |
1926 | self.placement = placement or PlacementSpec(host_pattern='*') | |
1927 | self.settings = settings or {} | |
1928 | self._last_updated: str = '' | |
1929 | ||
1930 | @classmethod | |
1931 | def from_json(cls, spec: Dict[str, Any]) -> 'TunedProfileSpec': | |
1932 | data = {} | |
1933 | if 'profile_name' not in spec: | |
1934 | raise SpecValidationError('Tuned profile spec must include "profile_name" field') | |
1935 | data['profile_name'] = spec['profile_name'] | |
1936 | if not isinstance(data['profile_name'], str): | |
1937 | raise SpecValidationError('"profile_name" field must be a string') | |
1938 | if 'placement' in spec: | |
1939 | data['placement'] = PlacementSpec.from_json(spec['placement']) | |
1940 | if 'settings' in spec: | |
1941 | data['settings'] = spec['settings'] | |
1942 | return cls(**data) | |
1943 | ||
1944 | def to_json(self) -> Dict[str, Any]: | |
1945 | res: Dict[str, Any] = {} | |
1946 | res['profile_name'] = self.profile_name | |
1947 | res['placement'] = self.placement.to_json() | |
1948 | res['settings'] = self.settings | |
1949 | return res | |
1950 | ||
1951 | def __eq__(self, other: Any) -> bool: | |
1952 | if isinstance(other, TunedProfileSpec): | |
1953 | if ( | |
1954 | self.placement == other.placement | |
1955 | and self.profile_name == other.profile_name | |
1956 | and self.settings == other.settings | |
1957 | ): | |
1958 | return True | |
1959 | return False | |
1960 | return NotImplemented | |
1961 | ||
1962 | def __repr__(self) -> str: | |
1963 | return f'TunedProfile({self.profile_name})' | |
1964 | ||
1965 | def copy(self) -> 'TunedProfileSpec': | |
1966 | # for making deep copies so you can edit the settings in one without affecting the other | |
1967 | # mostly for testing purposes | |
1968 | return TunedProfileSpec(self.profile_name, self.placement, self.settings.copy()) | |
39ae355f TL |
1969 | |
1970 | ||
1971 | class CephExporterSpec(ServiceSpec): | |
1972 | def __init__(self, | |
1973 | service_type: str = 'ceph-exporter', | |
1974 | sock_dir: Optional[str] = None, | |
1975 | addrs: str = '', | |
1976 | port: Optional[int] = None, | |
1977 | prio_limit: Optional[int] = 5, | |
1978 | stats_period: Optional[int] = 5, | |
1979 | placement: Optional[PlacementSpec] = None, | |
1980 | unmanaged: bool = False, | |
1981 | preview_only: bool = False, | |
aee94f69 | 1982 | extra_container_args: Optional[GeneralArgList] = None, |
39ae355f TL |
1983 | ): |
1984 | assert service_type == 'ceph-exporter' | |
1985 | ||
1986 | super(CephExporterSpec, self).__init__( | |
1987 | service_type, | |
1988 | placement=placement, | |
1989 | unmanaged=unmanaged, | |
1990 | preview_only=preview_only, | |
1991 | extra_container_args=extra_container_args) | |
1992 | ||
1993 | self.service_type = service_type | |
1994 | self.sock_dir = sock_dir | |
1995 | self.addrs = addrs | |
1996 | self.port = port | |
1997 | self.prio_limit = prio_limit | |
1998 | self.stats_period = stats_period | |
1999 | ||
2000 | def validate(self) -> None: | |
2001 | super(CephExporterSpec, self).validate() | |
2002 | ||
2003 | if not isinstance(self.prio_limit, int): | |
2004 | raise SpecValidationError( | |
2005 | f'prio_limit must be an integer. Got {type(self.prio_limit)}') | |
2006 | if not isinstance(self.stats_period, int): | |
2007 | raise SpecValidationError( | |
2008 | f'stats_period must be an integer. Got {type(self.stats_period)}') | |
2009 | ||
2010 | ||
2011 | yaml.add_representer(CephExporterSpec, ServiceSpec.yaml_representer) |