5 from abc
import ABCMeta
, abstractmethod
6 from typing
import TYPE_CHECKING
, List
, Callable
, Any
, TypeVar
, Generic
, \
7 Optional
, Dict
, Any
, Tuple
, NewType
9 from mgr_module
import HandleCommandResult
, MonCommandFailed
11 from ceph
.deployment
.service_spec
import ServiceSpec
, RGWSpec
12 from ceph
.deployment
.utils
import is_ipv6
, unwrap_ipv6
13 from orchestrator
import OrchestratorError
, DaemonDescription
14 from cephadm
import utils
17 from cephadm
.module
import CephadmOrchestrator
19 logger
= logging
.getLogger(__name__
)
21 ServiceSpecs
= TypeVar('ServiceSpecs', bound
=ServiceSpec
)
22 AuthEntity
= NewType('AuthEntity', str)
25 class CephadmDaemonSpec(Generic
[ServiceSpecs
]):
26 # typing.NamedTuple + Generic is broken in py36
27 def __init__(self
, host
: str, daemon_id
: str,
28 spec
: Optional
[ServiceSpecs
] = None,
29 network
: Optional
[str] = None,
30 keyring
: Optional
[str] = None,
31 extra_args
: Optional
[List
[str]] = None,
33 extra_files
: Optional
[Dict
[str, Any
]] = None,
34 daemon_type
: Optional
[str] = None,
35 ports
: Optional
[List
[int]] = None,):
38 * deploying new daemons. then everything is set
39 * redeploying existing daemons, then only the first three attrs are set.
41 Would be great to have a consistent usage where all properties are set.
44 self
.daemon_id
= daemon_id
45 daemon_type
= daemon_type
or (spec
.service_type
if spec
else None)
46 assert daemon_type
is not None
47 self
.daemon_type
: str = daemon_type
49 # would be great to have the spec always available:
50 self
.spec
: Optional
[ServiceSpecs
] = spec
53 self
.network
= network
56 self
.keyring
: Optional
[str] = keyring
58 # For run_cephadm. Would be great to have more expressive names.
59 self
.extra_args
: List
[str] = extra_args
or []
61 self
.ceph_conf
= ceph_conf
62 self
.extra_files
= extra_files
or {}
64 # TCP ports used by the daemon
65 self
.ports
: List
[int] = ports
or []
67 def name(self
) -> str:
68 return '%s.%s' % (self
.daemon_type
, self
.daemon_id
)
70 def config_get_files(self
) -> Dict
[str, Any
]:
71 files
= self
.extra_files
73 files
['config'] = self
.ceph_conf
78 class CephadmService(metaclass
=ABCMeta
):
80 Base class for service types. Often providing a create() and config() fn.
85 def TYPE(self
) -> str:
88 def __init__(self
, mgr
: "CephadmOrchestrator"):
89 self
.mgr
: "CephadmOrchestrator" = mgr
91 def make_daemon_spec(self
, host
: str,
94 spec
: ServiceSpecs
) -> CephadmDaemonSpec
:
95 return CephadmDaemonSpec(
102 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
103 raise NotImplementedError()
105 def generate_config(self
, daemon_spec
: CephadmDaemonSpec
) -> Tuple
[Dict
[str, Any
], List
[str]]:
106 raise NotImplementedError()
108 def daemon_check_post(self
, daemon_descrs
: List
[DaemonDescription
]) -> None:
109 """The post actions needed to be done after daemons are checked"""
110 if self
.mgr
.config_dashboard
:
111 if 'dashboard' in self
.mgr
.get('mgr_map')['modules']:
112 self
.config_dashboard(daemon_descrs
)
114 logger
.debug('Dashboard is not enabled. Skip configuration.')
116 def config_dashboard(self
, daemon_descrs
: List
[DaemonDescription
]) -> None:
117 """Config dashboard settings."""
118 raise NotImplementedError()
120 def get_active_daemon(self
, daemon_descrs
: List
[DaemonDescription
]) -> DaemonDescription
:
121 # if this is called for a service type where it hasn't explcitly been
122 # defined, return empty Daemon Desc
123 return DaemonDescription()
125 def _inventory_get_addr(self
, hostname
: str) -> str:
126 """Get a host's address with its hostname."""
127 return self
.mgr
.inventory
.get_addr(hostname
)
129 def _set_service_url_on_dashboard(self
,
133 service_url
: str) -> None:
134 """A helper to get and set service_url via Dashboard's MON command.
136 If result of get_mon_cmd differs from service_url, set_mon_cmd will
137 be sent to set the service_url.
139 def get_set_cmd_dicts(out
: str) -> List
[dict]:
141 'prefix': set_mon_cmd
,
144 return [cmd_dict
] if service_url
!= out
else []
146 self
._check
_and
_set
_dashboard
(
147 service_name
=service_name
,
149 get_set_cmd_dicts
=get_set_cmd_dicts
152 def _check_and_set_dashboard(self
,
155 get_set_cmd_dicts
: Callable
[[str], List
[dict]]) -> None:
156 """A helper to set configs in the Dashboard.
158 The method is useful for the pattern:
159 - Getting a config from Dashboard by using a Dashboard command. e.g. current iSCSI
161 - Parse or deserialize previous output. e.g. Dashboard command returns a JSON string.
162 - Determine if the config need to be update. NOTE: This step is important because if a
163 Dashboard command modified Ceph config, cephadm's config_notify() is called. Which
164 kicks the serve() loop and the logic using this method is likely to be called again.
165 A config should be updated only when needed.
166 - Update a config in Dashboard by using a Dashboard command.
168 :param service_name: the service name to be used for logging
169 :type service_name: str
170 :param get_cmd: Dashboard command prefix to get config. e.g. dashboard get-grafana-api-url
172 :param get_set_cmd_dicts: function to create a list, and each item is a command dictionary.
176 'prefix': 'dashboard iscsi-gateway-add',
177 'service_url': 'http://admin:admin@aaa:5000',
181 'prefix': 'dashboard iscsi-gateway-add',
182 'service_url': 'http://admin:admin@bbb:5000',
186 The function should return empty list if no command need to be sent.
187 :type get_set_cmd_dicts: Callable[[str], List[dict]]
191 _
, out
, _
= self
.mgr
.check_mon_command({
194 except MonCommandFailed
as e
:
195 logger
.warning('Failed to get Dashboard config for %s: %s', service_name
, e
)
197 cmd_dicts
= get_set_cmd_dicts(out
.strip())
198 for cmd_dict
in list(cmd_dicts
):
200 inbuf
= cmd_dict
.pop('inbuf', None)
201 _
, out
, _
= self
.mgr
.check_mon_command(cmd_dict
, inbuf
)
202 except MonCommandFailed
as e
:
203 logger
.warning('Failed to set Dashboard config for %s: %s', service_name
, e
)
205 def ok_to_stop(self
, daemon_ids
: List
[str]) -> HandleCommandResult
:
206 names
= [f
'{self.TYPE}.{d_id}' for d_id
in daemon_ids
]
207 out
= f
'It is presumed safe to stop {names}'
208 err
= f
'It is NOT safe to stop {names}'
210 if self
.TYPE
not in ['mon', 'osd', 'mds']:
212 return HandleCommandResult(0, out
, None)
214 r
= HandleCommandResult(*self
.mgr
.mon_command({
215 'prefix': f
'{self.TYPE} ok-to-stop',
220 err
= f
'{err}: {r.stderr}' if r
.stderr
else err
222 return HandleCommandResult(r
.retval
, r
.stdout
, err
)
224 out
= f
'{out}: {r.stdout}' if r
.stdout
else out
226 return HandleCommandResult(r
.retval
, out
, r
.stderr
)
228 def pre_remove(self
, daemon
: DaemonDescription
) -> None:
230 Called before the daemon is removed.
232 assert self
.TYPE
== daemon
.daemon_type
233 logger
.debug(f
'Pre remove daemon {self.TYPE}.{daemon.daemon_id}')
235 def post_remove(self
, daemon
: DaemonDescription
) -> None:
237 Called after the daemon is removed.
239 assert self
.TYPE
== daemon
.daemon_type
240 logger
.debug(f
'Post remove daemon {self.TYPE}.{daemon.daemon_id}')
243 class CephService(CephadmService
):
244 def generate_config(self
, daemon_spec
: CephadmDaemonSpec
) -> Tuple
[Dict
[str, Any
], List
[str]]:
245 # Ceph.daemons (mon, mgr, mds, osd, etc)
246 cephadm_config
= self
.get_config_and_keyring(
247 daemon_spec
.daemon_type
,
248 daemon_spec
.daemon_id
,
249 host
=daemon_spec
.host
,
250 keyring
=daemon_spec
.keyring
,
251 extra_ceph_config
=daemon_spec
.ceph_conf
)
253 if daemon_spec
.config_get_files():
254 cephadm_config
.update({'files': daemon_spec
.config_get_files()})
256 return cephadm_config
, []
258 def post_remove(self
, daemon
: DaemonDescription
) -> None:
259 super().post_remove(daemon
)
260 self
.remove_keyring(daemon
)
262 def get_auth_entity(self
, daemon_id
: str, host
: str = "") -> AuthEntity
:
264 Map the daemon id to a cephx keyring entity name
266 if self
.TYPE
in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]:
267 return AuthEntity(f
'client.{self.TYPE}.{daemon_id}')
268 elif self
.TYPE
== 'crash':
270 raise OrchestratorError("Host not provided to generate <crash> auth entity name")
271 return AuthEntity(f
'client.{self.TYPE}.{host}')
272 elif self
.TYPE
== 'mon':
273 return AuthEntity('mon.')
274 elif self
.TYPE
in ['mgr', 'osd', 'mds']:
275 return AuthEntity(f
'{self.TYPE}.{daemon_id}')
277 raise OrchestratorError("unknown daemon type")
279 def get_config_and_keyring(self
,
283 keyring
: Optional
[str] = None,
284 extra_ceph_config
: Optional
[str] = None
288 entity
: AuthEntity
= self
.get_auth_entity(daemon_id
, host
=host
)
289 ret
, keyring
, err
= self
.mgr
.check_mon_command({
290 'prefix': 'auth get',
294 config
= self
.mgr
.get_minimal_ceph_conf()
296 if extra_ceph_config
:
297 config
+= extra_ceph_config
304 def remove_keyring(self
, daemon
: DaemonDescription
) -> None:
305 daemon_id
: str = daemon
.daemon_id
306 host
: str = daemon
.hostname
308 if daemon_id
== 'mon':
309 # do not remove the mon keyring
312 entity
= self
.get_auth_entity(daemon_id
, host
=host
)
314 logger
.info(f
'Remove keyring: {entity}')
315 ret
, out
, err
= self
.mgr
.check_mon_command({
321 class MonService(CephService
):
324 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
326 Create a new monitor on the given host.
328 assert self
.TYPE
== daemon_spec
.daemon_type
329 name
, host
, network
= daemon_spec
.daemon_id
, daemon_spec
.host
, daemon_spec
.network
332 ret
, keyring
, err
= self
.mgr
.check_mon_command({
333 'prefix': 'auth get',
334 'entity': self
.get_auth_entity(name
),
337 extra_config
= '[mon.%s]\n' % name
339 # infer whether this is a CIDR network, addrvec, or plain IP
341 extra_config
+= 'public network = %s\n' % network
342 elif network
.startswith('[v') and network
.endswith(']'):
343 extra_config
+= 'public addrv = %s\n' % network
344 elif is_ipv6(network
):
345 extra_config
+= 'public addr = %s\n' % unwrap_ipv6(network
)
346 elif ':' not in network
:
347 extra_config
+= 'public addr = %s\n' % network
349 raise OrchestratorError(
350 'Must specify a CIDR network, ceph addrvec, or plain IP: \'%s\'' % network
)
352 # try to get the public_network from the config
353 ret
, network
, err
= self
.mgr
.check_mon_command({
354 'prefix': 'config get',
356 'key': 'public_network',
358 network
= network
.strip() if network
else network
360 raise OrchestratorError(
361 'Must set public_network config option or specify a CIDR network, ceph addrvec, or plain IP')
362 if '/' not in network
:
363 raise OrchestratorError(
364 'public_network is set but does not look like a CIDR network: \'%s\'' % network
)
365 extra_config
+= 'public network = %s\n' % network
367 daemon_spec
.ceph_conf
= extra_config
368 daemon_spec
.keyring
= keyring
372 def _check_safe_to_destroy(self
, mon_id
: str) -> None:
373 ret
, out
, err
= self
.mgr
.check_mon_command({
374 'prefix': 'quorum_status',
378 except Exception as e
:
379 raise OrchestratorError('failed to parse quorum status')
381 mons
= [m
['name'] for m
in j
['monmap']['mons']]
382 if mon_id
not in mons
:
383 logger
.info('Safe to remove mon.%s: not in monmap (%s)' % (
386 new_mons
= [m
for m
in mons
if m
!= mon_id
]
387 new_quorum
= [m
for m
in j
['quorum_names'] if m
!= mon_id
]
388 if len(new_quorum
) > len(new_mons
) / 2:
389 logger
.info('Safe to remove mon.%s: new quorum should be %s (from %s)' %
390 (mon_id
, new_quorum
, new_mons
))
392 raise OrchestratorError(
393 'Removing %s would break mon quorum (new quorum %s, new mons %s)' % (mon_id
, new_quorum
, new_mons
))
395 def pre_remove(self
, daemon
: DaemonDescription
) -> None:
396 super().pre_remove(daemon
)
398 daemon_id
: str = daemon
.daemon_id
399 self
._check
_safe
_to
_destroy
(daemon_id
)
401 # remove mon from quorum before we destroy the daemon
402 logger
.info('Removing monitor %s from monmap...' % daemon_id
)
403 ret
, out
, err
= self
.mgr
.check_mon_command({
409 class MgrService(CephService
):
412 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
414 Create a new manager instance on a host.
416 assert self
.TYPE
== daemon_spec
.daemon_type
417 mgr_id
, host
= daemon_spec
.daemon_id
, daemon_spec
.host
420 ret
, keyring
, err
= self
.mgr
.check_mon_command({
421 'prefix': 'auth get-or-create',
422 'entity': self
.get_auth_entity(mgr_id
),
423 'caps': ['mon', 'profile mgr',
428 # Retrieve ports used by manager modules
429 # In the case of the dashboard port and with several manager daemons
430 # running in different hosts, it exists the possibility that the
431 # user has decided to use different dashboard ports in each server
432 # If this is the case then the dashboard port opened will be only the used
436 ret
, mgr_services
, err
= self
.mgr
.check_mon_command({
437 'prefix': 'mgr services',
440 mgr_endpoints
= json
.loads(mgr_services
)
441 for end_point
in mgr_endpoints
.values():
442 port
= re
.search(r
'\:\d+\/', end_point
)
444 ports
.append(int(port
[0][1:-1]))
447 daemon_spec
.ports
= ports
449 daemon_spec
.keyring
= keyring
453 def get_active_daemon(self
, daemon_descrs
: List
[DaemonDescription
]) -> DaemonDescription
:
454 for daemon
in daemon_descrs
:
455 if self
.mgr
.daemon_is_self(daemon
.daemon_type
, daemon
.daemon_id
):
457 # if no active mgr found, return empty Daemon Desc
458 return DaemonDescription()
460 def fail_over(self
) -> None:
461 if not self
.mgr_map_has_standby():
462 raise OrchestratorError('Need standby mgr daemon', event_kind_subject
=(
463 'daemon', 'mgr' + self
.mgr
.get_mgr_id()))
465 self
.mgr
.events
.for_daemon('mgr' + self
.mgr
.get_mgr_id(),
466 'INFO', 'Failing over to other MGR')
467 logger
.info('Failing over to other MGR')
470 ret
, out
, err
= self
.mgr
.check_mon_command({
471 'prefix': 'mgr fail',
472 'who': self
.mgr
.get_mgr_id(),
475 def mgr_map_has_standby(self
) -> bool:
477 This is a bit safer than asking our inventory. If the mgr joined the mgr map,
478 we know it joined the cluster
480 mgr_map
= self
.mgr
.get('mgr_map')
481 num
= len(mgr_map
.get('standbys'))
485 class MdsService(CephService
):
488 def config(self
, spec
: ServiceSpec
) -> None:
489 assert self
.TYPE
== spec
.service_type
490 assert spec
.service_id
492 # ensure mds_join_fs is set for these daemons
493 ret
, out
, err
= self
.mgr
.check_mon_command({
494 'prefix': 'config set',
495 'who': 'mds.' + spec
.service_id
,
496 'name': 'mds_join_fs',
497 'value': spec
.service_id
,
500 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
501 assert self
.TYPE
== daemon_spec
.daemon_type
502 mds_id
, host
= daemon_spec
.daemon_id
, daemon_spec
.host
505 ret
, keyring
, err
= self
.mgr
.check_mon_command({
506 'prefix': 'auth get-or-create',
507 'entity': self
.get_auth_entity(mds_id
),
508 'caps': ['mon', 'profile mds',
509 'osd', 'allow rw tag cephfs *=*',
512 daemon_spec
.keyring
= keyring
516 def get_active_daemon(self
, daemon_descrs
: List
[DaemonDescription
]) -> DaemonDescription
:
517 active_mds_strs
= list()
518 for fs
in self
.mgr
.get('fs_map')['filesystems']:
519 mds_map
= fs
['mdsmap']
520 if mds_map
is not None:
521 for mds_id
, mds_status
in mds_map
['info'].items():
522 if mds_status
['state'] == 'up:active':
523 active_mds_strs
.append(mds_status
['name'])
524 if len(active_mds_strs
) != 0:
525 for daemon
in daemon_descrs
:
526 if daemon
.daemon_id
in active_mds_strs
:
528 # if no mds found, return empty Daemon Desc
529 return DaemonDescription()
532 class RgwService(CephService
):
535 def config(self
, spec
: RGWSpec
, rgw_id
: str) -> None:
536 assert self
.TYPE
== spec
.service_type
538 # create realm, zonegroup, and zone if needed
539 self
.create_realm_zonegroup_zone(spec
, rgw_id
)
541 # ensure rgw_realm and rgw_zone is set for these daemons
542 ret
, out
, err
= self
.mgr
.check_mon_command({
543 'prefix': 'config set',
544 'who': f
"{utils.name_to_config_section('rgw')}.{spec.service_id}",
546 'value': spec
.rgw_zone
,
548 ret
, out
, err
= self
.mgr
.check_mon_command({
549 'prefix': 'config set',
550 'who': f
"{utils.name_to_config_section('rgw')}.{spec.rgw_realm}",
552 'value': spec
.rgw_realm
,
554 ret
, out
, err
= self
.mgr
.check_mon_command({
555 'prefix': 'config set',
556 'who': f
"{utils.name_to_config_section('rgw')}.{spec.service_id}",
557 'name': 'rgw_frontends',
558 'value': spec
.rgw_frontends_config_value(),
561 if spec
.rgw_frontend_ssl_certificate
:
562 if isinstance(spec
.rgw_frontend_ssl_certificate
, list):
563 cert_data
= '\n'.join(spec
.rgw_frontend_ssl_certificate
)
564 elif isinstance(spec
.rgw_frontend_ssl_certificate
, str):
565 cert_data
= spec
.rgw_frontend_ssl_certificate
567 raise OrchestratorError(
568 'Invalid rgw_frontend_ssl_certificate: %s'
569 % spec
.rgw_frontend_ssl_certificate
)
570 ret
, out
, err
= self
.mgr
.check_mon_command({
571 'prefix': 'config-key set',
572 'key': f
'rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.crt',
576 if spec
.rgw_frontend_ssl_key
:
577 if isinstance(spec
.rgw_frontend_ssl_key
, list):
578 key_data
= '\n'.join(spec
.rgw_frontend_ssl_key
)
579 elif isinstance(spec
.rgw_frontend_ssl_certificate
, str):
580 key_data
= spec
.rgw_frontend_ssl_key
582 raise OrchestratorError(
583 'Invalid rgw_frontend_ssl_key: %s'
584 % spec
.rgw_frontend_ssl_key
)
585 ret
, out
, err
= self
.mgr
.check_mon_command({
586 'prefix': 'config-key set',
587 'key': f
'rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.key',
591 logger
.info('Saving service %s spec with placement %s' % (
592 spec
.service_name(), spec
.placement
.pretty_str()))
593 self
.mgr
.spec_store
.save(spec
)
595 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
596 assert self
.TYPE
== daemon_spec
.daemon_type
597 rgw_id
, host
= daemon_spec
.daemon_id
, daemon_spec
.host
599 keyring
= self
.get_keyring(rgw_id
)
601 daemon_spec
.keyring
= keyring
605 def get_keyring(self
, rgw_id
: str) -> str:
606 ret
, keyring
, err
= self
.mgr
.check_mon_command({
607 'prefix': 'auth get-or-create',
608 'entity': self
.get_auth_entity(rgw_id
),
609 'caps': ['mon', 'allow *',
611 'osd', 'allow rwx tag rgw *=*'],
615 def create_realm_zonegroup_zone(self
, spec
: RGWSpec
, rgw_id
: str) -> None:
616 if utils
.get_cluster_health(self
.mgr
) != 'HEALTH_OK':
617 raise OrchestratorError('Health not ok, will try again when health ok')
619 # get keyring needed to run rados commands and strip out just the keyring
620 keyring
= self
.get_keyring(rgw_id
).split('key = ', 1)[1].rstrip()
622 # We can call radosgw-admin within the container, cause cephadm gives the MGR the required keyring permissions
624 def get_realms() -> List
[str]:
625 cmd
= ['radosgw-admin',
626 '--key=%s' % keyring
,
627 '--user', 'rgw.%s' % rgw_id
,
630 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
636 return j
.get('realms', [])
637 except Exception as e
:
638 raise OrchestratorError('failed to parse realm info')
640 def create_realm() -> None:
641 cmd
= ['radosgw-admin',
642 '--key=%s' % keyring
,
643 '--user', 'rgw.%s' % rgw_id
,
645 '--rgw-realm=%s' % spec
.rgw_realm
,
647 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
648 self
.mgr
.log
.info('created realm: %s' % spec
.rgw_realm
)
650 def get_zonegroups() -> List
[str]:
651 cmd
= ['radosgw-admin',
652 '--key=%s' % keyring
,
653 '--user', 'rgw.%s' % rgw_id
,
656 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
662 return j
.get('zonegroups', [])
663 except Exception as e
:
664 raise OrchestratorError('failed to parse zonegroup info')
666 def create_zonegroup() -> None:
667 cmd
= ['radosgw-admin',
668 '--key=%s' % keyring
,
669 '--user', 'rgw.%s' % rgw_id
,
670 'zonegroup', 'create',
671 '--rgw-zonegroup=default',
672 '--master', '--default']
673 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
674 self
.mgr
.log
.info('created zonegroup: default')
676 def create_zonegroup_if_required() -> None:
677 zonegroups
= get_zonegroups()
678 if 'default' not in zonegroups
:
681 def get_zones() -> List
[str]:
682 cmd
= ['radosgw-admin',
683 '--key=%s' % keyring
,
684 '--user', 'rgw.%s' % rgw_id
,
687 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
693 return j
.get('zones', [])
694 except Exception as e
:
695 raise OrchestratorError('failed to parse zone info')
697 def create_zone() -> None:
698 cmd
= ['radosgw-admin',
699 '--key=%s' % keyring
,
700 '--user', 'rgw.%s' % rgw_id
,
702 '--rgw-zonegroup=default',
703 '--rgw-zone=%s' % spec
.rgw_zone
,
704 '--master', '--default']
705 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
706 self
.mgr
.log
.info('created zone: %s' % spec
.rgw_zone
)
709 realms
= get_realms()
710 if spec
.rgw_realm
not in realms
:
715 if spec
.rgw_zone
not in zones
:
716 create_zonegroup_if_required()
720 # update period if changes were made
722 cmd
= ['radosgw-admin',
723 '--key=%s' % keyring
,
724 '--user', 'rgw.%s' % rgw_id
,
726 '--rgw-realm=%s' % spec
.rgw_realm
,
728 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
729 self
.mgr
.log
.info('updated period')
732 class RbdMirrorService(CephService
):
735 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
736 assert self
.TYPE
== daemon_spec
.daemon_type
737 daemon_id
, host
= daemon_spec
.daemon_id
, daemon_spec
.host
739 ret
, keyring
, err
= self
.mgr
.check_mon_command({
740 'prefix': 'auth get-or-create',
741 'entity': self
.get_auth_entity(daemon_id
),
742 'caps': ['mon', 'profile rbd-mirror',
743 'osd', 'profile rbd'],
746 daemon_spec
.keyring
= keyring
751 class CrashService(CephService
):
754 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
755 assert self
.TYPE
== daemon_spec
.daemon_type
756 daemon_id
, host
= daemon_spec
.daemon_id
, daemon_spec
.host
758 ret
, keyring
, err
= self
.mgr
.check_mon_command({
759 'prefix': 'auth get-or-create',
760 'entity': self
.get_auth_entity(daemon_id
, host
=host
),
761 'caps': ['mon', 'profile crash',
762 'mgr', 'profile crash'],
765 daemon_spec
.keyring
= keyring