]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/services/cephadmservice.py
1cc564a66d7ec5e4649d3f76aaca94047896e71d
[ceph.git] / ceph / src / pybind / mgr / cephadm / services / cephadmservice.py
1 import json
2 import re
3 import logging
4 import subprocess
5 from abc import ABCMeta, abstractmethod
6 from typing import TYPE_CHECKING, List, Callable, Any, TypeVar, Generic, \
7 Optional, Dict, Any, Tuple, NewType
8
9 from mgr_module import HandleCommandResult, MonCommandFailed
10
11 from ceph.deployment.service_spec import ServiceSpec, RGWSpec
12 from ceph.deployment.utils import is_ipv6, unwrap_ipv6
13 from orchestrator import OrchestratorError, DaemonDescription
14 from cephadm import utils
15
16 if TYPE_CHECKING:
17 from cephadm.module import CephadmOrchestrator
18
19 logger = logging.getLogger(__name__)
20
21 ServiceSpecs = TypeVar('ServiceSpecs', bound=ServiceSpec)
22 AuthEntity = NewType('AuthEntity', str)
23
24
25 class CephadmDaemonSpec(Generic[ServiceSpecs]):
26 # typing.NamedTuple + Generic is broken in py36
27 def __init__(self, host: str, daemon_id: str,
28 spec: Optional[ServiceSpecs] = None,
29 network: Optional[str] = None,
30 keyring: Optional[str] = None,
31 extra_args: Optional[List[str]] = None,
32 ceph_conf: str = '',
33 extra_files: Optional[Dict[str, Any]] = None,
34 daemon_type: Optional[str] = None,
35 ports: Optional[List[int]] = None,):
36 """
37 Used for
38 * deploying new daemons. then everything is set
39 * redeploying existing daemons, then only the first three attrs are set.
40
41 Would be great to have a consistent usage where all properties are set.
42 """
43 self.host: str = host
44 self.daemon_id = daemon_id
45 daemon_type = daemon_type or (spec.service_type if spec else None)
46 assert daemon_type is not None
47 self.daemon_type: str = daemon_type
48
49 # would be great to have the spec always available:
50 self.spec: Optional[ServiceSpecs] = spec
51
52 # mons
53 self.network = network
54
55 # for run_cephadm.
56 self.keyring: Optional[str] = keyring
57
58 # For run_cephadm. Would be great to have more expressive names.
59 self.extra_args: List[str] = extra_args or []
60
61 self.ceph_conf = ceph_conf
62 self.extra_files = extra_files or {}
63
64 # TCP ports used by the daemon
65 self.ports: List[int] = ports or []
66
67 def name(self) -> str:
68 return '%s.%s' % (self.daemon_type, self.daemon_id)
69
70 def config_get_files(self) -> Dict[str, Any]:
71 files = self.extra_files
72 if self.ceph_conf:
73 files['config'] = self.ceph_conf
74
75 return files
76
77
78 class CephadmService(metaclass=ABCMeta):
79 """
80 Base class for service types. Often providing a create() and config() fn.
81 """
82
83 @property
84 @abstractmethod
85 def TYPE(self) -> str:
86 pass
87
88 def __init__(self, mgr: "CephadmOrchestrator"):
89 self.mgr: "CephadmOrchestrator" = mgr
90
91 def make_daemon_spec(self, host: str,
92 daemon_id: str,
93 netowrk: str,
94 spec: ServiceSpecs) -> CephadmDaemonSpec:
95 return CephadmDaemonSpec(
96 host=host,
97 daemon_id=daemon_id,
98 spec=spec,
99 network=netowrk
100 )
101
102 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
103 raise NotImplementedError()
104
105 def generate_config(self, daemon_spec: CephadmDaemonSpec) -> Tuple[Dict[str, Any], List[str]]:
106 raise NotImplementedError()
107
108 def daemon_check_post(self, daemon_descrs: List[DaemonDescription]) -> None:
109 """The post actions needed to be done after daemons are checked"""
110 if self.mgr.config_dashboard:
111 if 'dashboard' in self.mgr.get('mgr_map')['modules']:
112 self.config_dashboard(daemon_descrs)
113 else:
114 logger.debug('Dashboard is not enabled. Skip configuration.')
115
116 def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None:
117 """Config dashboard settings."""
118 raise NotImplementedError()
119
120 def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
121 # if this is called for a service type where it hasn't explcitly been
122 # defined, return empty Daemon Desc
123 return DaemonDescription()
124
125 def _inventory_get_addr(self, hostname: str) -> str:
126 """Get a host's address with its hostname."""
127 return self.mgr.inventory.get_addr(hostname)
128
129 def _set_service_url_on_dashboard(self,
130 service_name: str,
131 get_mon_cmd: str,
132 set_mon_cmd: str,
133 service_url: str) -> None:
134 """A helper to get and set service_url via Dashboard's MON command.
135
136 If result of get_mon_cmd differs from service_url, set_mon_cmd will
137 be sent to set the service_url.
138 """
139 def get_set_cmd_dicts(out: str) -> List[dict]:
140 cmd_dict = {
141 'prefix': set_mon_cmd,
142 'value': service_url
143 }
144 return [cmd_dict] if service_url != out else []
145
146 self._check_and_set_dashboard(
147 service_name=service_name,
148 get_cmd=get_mon_cmd,
149 get_set_cmd_dicts=get_set_cmd_dicts
150 )
151
152 def _check_and_set_dashboard(self,
153 service_name: str,
154 get_cmd: str,
155 get_set_cmd_dicts: Callable[[str], List[dict]]) -> None:
156 """A helper to set configs in the Dashboard.
157
158 The method is useful for the pattern:
159 - Getting a config from Dashboard by using a Dashboard command. e.g. current iSCSI
160 gateways.
161 - Parse or deserialize previous output. e.g. Dashboard command returns a JSON string.
162 - Determine if the config need to be update. NOTE: This step is important because if a
163 Dashboard command modified Ceph config, cephadm's config_notify() is called. Which
164 kicks the serve() loop and the logic using this method is likely to be called again.
165 A config should be updated only when needed.
166 - Update a config in Dashboard by using a Dashboard command.
167
168 :param service_name: the service name to be used for logging
169 :type service_name: str
170 :param get_cmd: Dashboard command prefix to get config. e.g. dashboard get-grafana-api-url
171 :type get_cmd: str
172 :param get_set_cmd_dicts: function to create a list, and each item is a command dictionary.
173 e.g.
174 [
175 {
176 'prefix': 'dashboard iscsi-gateway-add',
177 'service_url': 'http://admin:admin@aaa:5000',
178 'name': 'aaa'
179 },
180 {
181 'prefix': 'dashboard iscsi-gateway-add',
182 'service_url': 'http://admin:admin@bbb:5000',
183 'name': 'bbb'
184 }
185 ]
186 The function should return empty list if no command need to be sent.
187 :type get_set_cmd_dicts: Callable[[str], List[dict]]
188 """
189
190 try:
191 _, out, _ = self.mgr.check_mon_command({
192 'prefix': get_cmd
193 })
194 except MonCommandFailed as e:
195 logger.warning('Failed to get Dashboard config for %s: %s', service_name, e)
196 return
197 cmd_dicts = get_set_cmd_dicts(out.strip())
198 for cmd_dict in list(cmd_dicts):
199 try:
200 _, out, _ = self.mgr.check_mon_command(cmd_dict)
201 except MonCommandFailed as e:
202 logger.warning('Failed to set Dashboard config for %s: %s', service_name, e)
203
204 def ok_to_stop(self, daemon_ids: List[str]) -> HandleCommandResult:
205 names = [f'{self.TYPE}.{d_id}' for d_id in daemon_ids]
206 out = f'It is presumed safe to stop {names}'
207 err = f'It is NOT safe to stop {names}'
208
209 if self.TYPE not in ['mon', 'osd', 'mds']:
210 logger.info(out)
211 return HandleCommandResult(0, out, None)
212
213 r = HandleCommandResult(*self.mgr.mon_command({
214 'prefix': f'{self.TYPE} ok-to-stop',
215 'ids': daemon_ids,
216 }))
217
218 if r.retval:
219 err = f'{err}: {r.stderr}' if r.stderr else err
220 logger.error(err)
221 return HandleCommandResult(r.retval, r.stdout, err)
222
223 out = f'{out}: {r.stdout}' if r.stdout else out
224 logger.info(out)
225 return HandleCommandResult(r.retval, out, r.stderr)
226
227 def pre_remove(self, daemon: DaemonDescription) -> None:
228 """
229 Called before the daemon is removed.
230 """
231 assert self.TYPE == daemon.daemon_type
232 logger.debug(f'Pre remove daemon {self.TYPE}.{daemon.daemon_id}')
233
234 def post_remove(self, daemon: DaemonDescription) -> None:
235 """
236 Called after the daemon is removed.
237 """
238 assert self.TYPE == daemon.daemon_type
239 logger.debug(f'Post remove daemon {self.TYPE}.{daemon.daemon_id}')
240
241
242 class CephService(CephadmService):
243 def generate_config(self, daemon_spec: CephadmDaemonSpec) -> Tuple[Dict[str, Any], List[str]]:
244 # Ceph.daemons (mon, mgr, mds, osd, etc)
245 cephadm_config = self.get_config_and_keyring(
246 daemon_spec.daemon_type,
247 daemon_spec.daemon_id,
248 host=daemon_spec.host,
249 keyring=daemon_spec.keyring,
250 extra_ceph_config=daemon_spec.ceph_conf)
251
252 if daemon_spec.config_get_files():
253 cephadm_config.update({'files': daemon_spec.config_get_files()})
254
255 return cephadm_config, []
256
257 def post_remove(self, daemon: DaemonDescription) -> None:
258 super().post_remove(daemon)
259 self.remove_keyring(daemon)
260
261 def get_auth_entity(self, daemon_id: str, host: str = "") -> AuthEntity:
262 """
263 Map the daemon id to a cephx keyring entity name
264 """
265 if self.TYPE in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]:
266 return AuthEntity(f'client.{self.TYPE}.{daemon_id}')
267 elif self.TYPE == 'crash':
268 if host == "":
269 raise OrchestratorError("Host not provided to generate <crash> auth entity name")
270 return AuthEntity(f'client.{self.TYPE}.{host}')
271 elif self.TYPE == 'mon':
272 return AuthEntity('mon.')
273 elif self.TYPE in ['mgr', 'osd', 'mds']:
274 return AuthEntity(f'{self.TYPE}.{daemon_id}')
275 else:
276 raise OrchestratorError("unknown daemon type")
277
278 def get_config_and_keyring(self,
279 daemon_type: str,
280 daemon_id: str,
281 host: str,
282 keyring: Optional[str] = None,
283 extra_ceph_config: Optional[str] = None
284 ) -> Dict[str, Any]:
285 # keyring
286 if not keyring:
287 entity: AuthEntity = self.get_auth_entity(daemon_id, host=host)
288 ret, keyring, err = self.mgr.check_mon_command({
289 'prefix': 'auth get',
290 'entity': entity,
291 })
292
293 config = self.mgr.get_minimal_ceph_conf()
294
295 if extra_ceph_config:
296 config += extra_ceph_config
297
298 return {
299 'config': config,
300 'keyring': keyring,
301 }
302
303 def remove_keyring(self, daemon: DaemonDescription) -> None:
304 daemon_id: str = daemon.daemon_id
305 host: str = daemon.hostname
306
307 if daemon_id == 'mon':
308 # do not remove the mon keyring
309 return
310
311 entity = self.get_auth_entity(daemon_id, host=host)
312
313 logger.info(f'Remove keyring: {entity}')
314 ret, out, err = self.mgr.check_mon_command({
315 'prefix': 'auth rm',
316 'entity': entity,
317 })
318
319
320 class MonService(CephService):
321 TYPE = 'mon'
322
323 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
324 """
325 Create a new monitor on the given host.
326 """
327 assert self.TYPE == daemon_spec.daemon_type
328 name, host, network = daemon_spec.daemon_id, daemon_spec.host, daemon_spec.network
329
330 # get mon. key
331 ret, keyring, err = self.mgr.check_mon_command({
332 'prefix': 'auth get',
333 'entity': self.get_auth_entity(name),
334 })
335
336 extra_config = '[mon.%s]\n' % name
337 if network:
338 # infer whether this is a CIDR network, addrvec, or plain IP
339 if '/' in network:
340 extra_config += 'public network = %s\n' % network
341 elif network.startswith('[v') and network.endswith(']'):
342 extra_config += 'public addrv = %s\n' % network
343 elif is_ipv6(network):
344 extra_config += 'public addr = %s\n' % unwrap_ipv6(network)
345 elif ':' not in network:
346 extra_config += 'public addr = %s\n' % network
347 else:
348 raise OrchestratorError(
349 'Must specify a CIDR network, ceph addrvec, or plain IP: \'%s\'' % network)
350 else:
351 # try to get the public_network from the config
352 ret, network, err = self.mgr.check_mon_command({
353 'prefix': 'config get',
354 'who': 'mon',
355 'key': 'public_network',
356 })
357 network = network.strip() if network else network
358 if not network:
359 raise OrchestratorError(
360 'Must set public_network config option or specify a CIDR network, ceph addrvec, or plain IP')
361 if '/' not in network:
362 raise OrchestratorError(
363 'public_network is set but does not look like a CIDR network: \'%s\'' % network)
364 extra_config += 'public network = %s\n' % network
365
366 daemon_spec.ceph_conf = extra_config
367 daemon_spec.keyring = keyring
368
369 return daemon_spec
370
371 def _check_safe_to_destroy(self, mon_id: str) -> None:
372 ret, out, err = self.mgr.check_mon_command({
373 'prefix': 'quorum_status',
374 })
375 try:
376 j = json.loads(out)
377 except Exception as e:
378 raise OrchestratorError('failed to parse quorum status')
379
380 mons = [m['name'] for m in j['monmap']['mons']]
381 if mon_id not in mons:
382 logger.info('Safe to remove mon.%s: not in monmap (%s)' % (
383 mon_id, mons))
384 return
385 new_mons = [m for m in mons if m != mon_id]
386 new_quorum = [m for m in j['quorum_names'] if m != mon_id]
387 if len(new_quorum) > len(new_mons) / 2:
388 logger.info('Safe to remove mon.%s: new quorum should be %s (from %s)' %
389 (mon_id, new_quorum, new_mons))
390 return
391 raise OrchestratorError(
392 'Removing %s would break mon quorum (new quorum %s, new mons %s)' % (mon_id, new_quorum, new_mons))
393
394 def pre_remove(self, daemon: DaemonDescription) -> None:
395 super().pre_remove(daemon)
396
397 daemon_id: str = daemon.daemon_id
398 self._check_safe_to_destroy(daemon_id)
399
400 # remove mon from quorum before we destroy the daemon
401 logger.info('Removing monitor %s from monmap...' % daemon_id)
402 ret, out, err = self.mgr.check_mon_command({
403 'prefix': 'mon rm',
404 'name': daemon_id,
405 })
406
407
408 class MgrService(CephService):
409 TYPE = 'mgr'
410
411 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
412 """
413 Create a new manager instance on a host.
414 """
415 assert self.TYPE == daemon_spec.daemon_type
416 mgr_id, host = daemon_spec.daemon_id, daemon_spec.host
417
418 # get mgr. key
419 ret, keyring, err = self.mgr.check_mon_command({
420 'prefix': 'auth get-or-create',
421 'entity': self.get_auth_entity(mgr_id),
422 'caps': ['mon', 'profile mgr',
423 'osd', 'allow *',
424 'mds', 'allow *'],
425 })
426
427 # Retrieve ports used by manager modules
428 # In the case of the dashboard port and with several manager daemons
429 # running in different hosts, it exists the possibility that the
430 # user has decided to use different dashboard ports in each server
431 # If this is the case then the dashboard port opened will be only the used
432 # as default.
433 ports = []
434 config_ports = ''
435 ret, mgr_services, err = self.mgr.check_mon_command({
436 'prefix': 'mgr services',
437 })
438 if mgr_services:
439 mgr_endpoints = json.loads(mgr_services)
440 for end_point in mgr_endpoints.values():
441 port = re.search(r'\:\d+\/', end_point)
442 if port:
443 ports.append(int(port[0][1:-1]))
444
445 if ports:
446 daemon_spec.ports = ports
447
448 daemon_spec.keyring = keyring
449
450 return daemon_spec
451
452 def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
453 for daemon in daemon_descrs:
454 if self.mgr.daemon_is_self(daemon.daemon_type, daemon.daemon_id):
455 return daemon
456 # if no active mgr found, return empty Daemon Desc
457 return DaemonDescription()
458
459 def fail_over(self) -> None:
460 if not self.mgr_map_has_standby():
461 raise OrchestratorError('Need standby mgr daemon', event_kind_subject=(
462 'daemon', 'mgr' + self.mgr.get_mgr_id()))
463
464 self.mgr.events.for_daemon('mgr' + self.mgr.get_mgr_id(),
465 'INFO', 'Failing over to other MGR')
466 logger.info('Failing over to other MGR')
467
468 # fail over
469 ret, out, err = self.mgr.check_mon_command({
470 'prefix': 'mgr fail',
471 'who': self.mgr.get_mgr_id(),
472 })
473
474 def mgr_map_has_standby(self) -> bool:
475 """
476 This is a bit safer than asking our inventory. If the mgr joined the mgr map,
477 we know it joined the cluster
478 """
479 mgr_map = self.mgr.get('mgr_map')
480 num = len(mgr_map.get('standbys'))
481 return bool(num)
482
483
484 class MdsService(CephService):
485 TYPE = 'mds'
486
487 def config(self, spec: ServiceSpec) -> None:
488 assert self.TYPE == spec.service_type
489 assert spec.service_id
490
491 # ensure mds_join_fs is set for these daemons
492 ret, out, err = self.mgr.check_mon_command({
493 'prefix': 'config set',
494 'who': 'mds.' + spec.service_id,
495 'name': 'mds_join_fs',
496 'value': spec.service_id,
497 })
498
499 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
500 assert self.TYPE == daemon_spec.daemon_type
501 mds_id, host = daemon_spec.daemon_id, daemon_spec.host
502
503 # get mgr. key
504 ret, keyring, err = self.mgr.check_mon_command({
505 'prefix': 'auth get-or-create',
506 'entity': self.get_auth_entity(mds_id),
507 'caps': ['mon', 'profile mds',
508 'osd', 'allow rw tag cephfs *=*',
509 'mds', 'allow'],
510 })
511 daemon_spec.keyring = keyring
512
513 return daemon_spec
514
515 def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
516 active_mds_strs = list()
517 for fs in self.mgr.get('fs_map')['filesystems']:
518 mds_map = fs['mdsmap']
519 if mds_map is not None:
520 for mds_id, mds_status in mds_map['info'].items():
521 if mds_status['state'] == 'up:active':
522 active_mds_strs.append(mds_status['name'])
523 if len(active_mds_strs) != 0:
524 for daemon in daemon_descrs:
525 if daemon.daemon_id in active_mds_strs:
526 return daemon
527 # if no mds found, return empty Daemon Desc
528 return DaemonDescription()
529
530
531 class RgwService(CephService):
532 TYPE = 'rgw'
533
534 def config(self, spec: RGWSpec, rgw_id: str) -> None:
535 assert self.TYPE == spec.service_type
536
537 # create realm, zonegroup, and zone if needed
538 self.create_realm_zonegroup_zone(spec, rgw_id)
539
540 # ensure rgw_realm and rgw_zone is set for these daemons
541 ret, out, err = self.mgr.check_mon_command({
542 'prefix': 'config set',
543 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
544 'name': 'rgw_zone',
545 'value': spec.rgw_zone,
546 })
547 ret, out, err = self.mgr.check_mon_command({
548 'prefix': 'config set',
549 'who': f"{utils.name_to_config_section('rgw')}.{spec.rgw_realm}",
550 'name': 'rgw_realm',
551 'value': spec.rgw_realm,
552 })
553 ret, out, err = self.mgr.check_mon_command({
554 'prefix': 'config set',
555 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
556 'name': 'rgw_frontends',
557 'value': spec.rgw_frontends_config_value(),
558 })
559
560 if spec.rgw_frontend_ssl_certificate:
561 if isinstance(spec.rgw_frontend_ssl_certificate, list):
562 cert_data = '\n'.join(spec.rgw_frontend_ssl_certificate)
563 elif isinstance(spec.rgw_frontend_ssl_certificate, str):
564 cert_data = spec.rgw_frontend_ssl_certificate
565 else:
566 raise OrchestratorError(
567 'Invalid rgw_frontend_ssl_certificate: %s'
568 % spec.rgw_frontend_ssl_certificate)
569 ret, out, err = self.mgr.check_mon_command({
570 'prefix': 'config-key set',
571 'key': f'rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.crt',
572 'val': cert_data,
573 })
574
575 if spec.rgw_frontend_ssl_key:
576 if isinstance(spec.rgw_frontend_ssl_key, list):
577 key_data = '\n'.join(spec.rgw_frontend_ssl_key)
578 elif isinstance(spec.rgw_frontend_ssl_certificate, str):
579 key_data = spec.rgw_frontend_ssl_key
580 else:
581 raise OrchestratorError(
582 'Invalid rgw_frontend_ssl_key: %s'
583 % spec.rgw_frontend_ssl_key)
584 ret, out, err = self.mgr.check_mon_command({
585 'prefix': 'config-key set',
586 'key': f'rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.key',
587 'val': key_data,
588 })
589
590 logger.info('Saving service %s spec with placement %s' % (
591 spec.service_name(), spec.placement.pretty_str()))
592 self.mgr.spec_store.save(spec)
593
594 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
595 assert self.TYPE == daemon_spec.daemon_type
596 rgw_id, host = daemon_spec.daemon_id, daemon_spec.host
597
598 keyring = self.get_keyring(rgw_id)
599
600 daemon_spec.keyring = keyring
601
602 return daemon_spec
603
604 def get_keyring(self, rgw_id: str) -> str:
605 ret, keyring, err = self.mgr.check_mon_command({
606 'prefix': 'auth get-or-create',
607 'entity': self.get_auth_entity(rgw_id),
608 'caps': ['mon', 'allow *',
609 'mgr', 'allow rw',
610 'osd', 'allow rwx tag rgw *=*'],
611 })
612 return keyring
613
614 def create_realm_zonegroup_zone(self, spec: RGWSpec, rgw_id: str) -> None:
615 if utils.get_cluster_health(self.mgr) != 'HEALTH_OK':
616 raise OrchestratorError('Health not ok, will try again when health ok')
617
618 # get keyring needed to run rados commands and strip out just the keyring
619 keyring = self.get_keyring(rgw_id).split('key = ', 1)[1].rstrip()
620
621 # We can call radosgw-admin within the container, cause cephadm gives the MGR the required keyring permissions
622
623 def get_realms() -> List[str]:
624 cmd = ['radosgw-admin',
625 '--key=%s' % keyring,
626 '--user', 'rgw.%s' % rgw_id,
627 'realm', 'list',
628 '--format=json']
629 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
630 out = result.stdout
631 if not out:
632 return []
633 try:
634 j = json.loads(out)
635 return j.get('realms', [])
636 except Exception as e:
637 raise OrchestratorError('failed to parse realm info')
638
639 def create_realm() -> None:
640 cmd = ['radosgw-admin',
641 '--key=%s' % keyring,
642 '--user', 'rgw.%s' % rgw_id,
643 'realm', 'create',
644 '--rgw-realm=%s' % spec.rgw_realm,
645 '--default']
646 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
647 self.mgr.log.info('created realm: %s' % spec.rgw_realm)
648
649 def get_zonegroups() -> List[str]:
650 cmd = ['radosgw-admin',
651 '--key=%s' % keyring,
652 '--user', 'rgw.%s' % rgw_id,
653 'zonegroup', 'list',
654 '--format=json']
655 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
656 out = result.stdout
657 if not out:
658 return []
659 try:
660 j = json.loads(out)
661 return j.get('zonegroups', [])
662 except Exception as e:
663 raise OrchestratorError('failed to parse zonegroup info')
664
665 def create_zonegroup() -> None:
666 cmd = ['radosgw-admin',
667 '--key=%s' % keyring,
668 '--user', 'rgw.%s' % rgw_id,
669 'zonegroup', 'create',
670 '--rgw-zonegroup=default',
671 '--master', '--default']
672 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
673 self.mgr.log.info('created zonegroup: default')
674
675 def create_zonegroup_if_required() -> None:
676 zonegroups = get_zonegroups()
677 if 'default' not in zonegroups:
678 create_zonegroup()
679
680 def get_zones() -> List[str]:
681 cmd = ['radosgw-admin',
682 '--key=%s' % keyring,
683 '--user', 'rgw.%s' % rgw_id,
684 'zone', 'list',
685 '--format=json']
686 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
687 out = result.stdout
688 if not out:
689 return []
690 try:
691 j = json.loads(out)
692 return j.get('zones', [])
693 except Exception as e:
694 raise OrchestratorError('failed to parse zone info')
695
696 def create_zone() -> None:
697 cmd = ['radosgw-admin',
698 '--key=%s' % keyring,
699 '--user', 'rgw.%s' % rgw_id,
700 'zone', 'create',
701 '--rgw-zonegroup=default',
702 '--rgw-zone=%s' % spec.rgw_zone,
703 '--master', '--default']
704 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
705 self.mgr.log.info('created zone: %s' % spec.rgw_zone)
706
707 changes = False
708 realms = get_realms()
709 if spec.rgw_realm not in realms:
710 create_realm()
711 changes = True
712
713 zones = get_zones()
714 if spec.rgw_zone not in zones:
715 create_zonegroup_if_required()
716 create_zone()
717 changes = True
718
719 # update period if changes were made
720 if changes:
721 cmd = ['radosgw-admin',
722 '--key=%s' % keyring,
723 '--user', 'rgw.%s' % rgw_id,
724 'period', 'update',
725 '--rgw-realm=%s' % spec.rgw_realm,
726 '--commit']
727 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
728 self.mgr.log.info('updated period')
729
730
731 class RbdMirrorService(CephService):
732 TYPE = 'rbd-mirror'
733
734 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
735 assert self.TYPE == daemon_spec.daemon_type
736 daemon_id, host = daemon_spec.daemon_id, daemon_spec.host
737
738 ret, keyring, err = self.mgr.check_mon_command({
739 'prefix': 'auth get-or-create',
740 'entity': self.get_auth_entity(daemon_id),
741 'caps': ['mon', 'profile rbd-mirror',
742 'osd', 'profile rbd'],
743 })
744
745 daemon_spec.keyring = keyring
746
747 return daemon_spec
748
749
750 class CrashService(CephService):
751 TYPE = 'crash'
752
753 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
754 assert self.TYPE == daemon_spec.daemon_type
755 daemon_id, host = daemon_spec.daemon_id, daemon_spec.host
756
757 ret, keyring, err = self.mgr.check_mon_command({
758 'prefix': 'auth get-or-create',
759 'entity': self.get_auth_entity(daemon_id, host=host),
760 'caps': ['mon', 'profile crash',
761 'mgr', 'profile crash'],
762 })
763
764 daemon_spec.keyring = keyring
765
766 return daemon_spec