5 from abc
import ABCMeta
, abstractmethod
6 from typing
import TYPE_CHECKING
, List
, Callable
, Any
, TypeVar
, Generic
, \
7 Optional
, Dict
, Any
, Tuple
, NewType
9 from mgr_module
import HandleCommandResult
, MonCommandFailed
11 from ceph
.deployment
.service_spec
import ServiceSpec
, RGWSpec
12 from ceph
.deployment
.utils
import is_ipv6
, unwrap_ipv6
13 from orchestrator
import OrchestratorError
, DaemonDescription
14 from cephadm
import utils
17 from cephadm
.module
import CephadmOrchestrator
19 logger
= logging
.getLogger(__name__
)
21 ServiceSpecs
= TypeVar('ServiceSpecs', bound
=ServiceSpec
)
22 AuthEntity
= NewType('AuthEntity', str)
25 class CephadmDaemonSpec(Generic
[ServiceSpecs
]):
26 # typing.NamedTuple + Generic is broken in py36
27 def __init__(self
, host
: str, daemon_id
: str,
28 spec
: Optional
[ServiceSpecs
] = None,
29 network
: Optional
[str] = None,
30 keyring
: Optional
[str] = None,
31 extra_args
: Optional
[List
[str]] = None,
33 extra_files
: Optional
[Dict
[str, Any
]] = None,
34 daemon_type
: Optional
[str] = None,
35 ports
: Optional
[List
[int]] = None,):
38 * deploying new daemons. then everything is set
39 * redeploying existing daemons, then only the first three attrs are set.
41 Would be great to have a consistent usage where all properties are set.
44 self
.daemon_id
= daemon_id
45 daemon_type
= daemon_type
or (spec
.service_type
if spec
else None)
46 assert daemon_type
is not None
47 self
.daemon_type
: str = daemon_type
49 # would be great to have the spec always available:
50 self
.spec
: Optional
[ServiceSpecs
] = spec
53 self
.network
= network
56 self
.keyring
: Optional
[str] = keyring
58 # For run_cephadm. Would be great to have more expressive names.
59 self
.extra_args
: List
[str] = extra_args
or []
61 self
.ceph_conf
= ceph_conf
62 self
.extra_files
= extra_files
or {}
64 # TCP ports used by the daemon
65 self
.ports
: List
[int] = ports
or []
67 def name(self
) -> str:
68 return '%s.%s' % (self
.daemon_type
, self
.daemon_id
)
70 def config_get_files(self
) -> Dict
[str, Any
]:
71 files
= self
.extra_files
73 files
['config'] = self
.ceph_conf
78 class CephadmService(metaclass
=ABCMeta
):
80 Base class for service types. Often providing a create() and config() fn.
85 def TYPE(self
) -> str:
88 def __init__(self
, mgr
: "CephadmOrchestrator"):
89 self
.mgr
: "CephadmOrchestrator" = mgr
91 def make_daemon_spec(self
, host
: str,
94 spec
: ServiceSpecs
) -> CephadmDaemonSpec
:
95 return CephadmDaemonSpec(
102 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
103 raise NotImplementedError()
105 def generate_config(self
, daemon_spec
: CephadmDaemonSpec
) -> Tuple
[Dict
[str, Any
], List
[str]]:
106 raise NotImplementedError()
108 def daemon_check_post(self
, daemon_descrs
: List
[DaemonDescription
]) -> None:
109 """The post actions needed to be done after daemons are checked"""
110 if self
.mgr
.config_dashboard
:
111 if 'dashboard' in self
.mgr
.get('mgr_map')['modules']:
112 self
.config_dashboard(daemon_descrs
)
114 logger
.debug('Dashboard is not enabled. Skip configuration.')
116 def config_dashboard(self
, daemon_descrs
: List
[DaemonDescription
]) -> None:
117 """Config dashboard settings."""
118 raise NotImplementedError()
120 def get_active_daemon(self
, daemon_descrs
: List
[DaemonDescription
]) -> DaemonDescription
:
121 # if this is called for a service type where it hasn't explcitly been
122 # defined, return empty Daemon Desc
123 return DaemonDescription()
125 def _inventory_get_addr(self
, hostname
: str) -> str:
126 """Get a host's address with its hostname."""
127 return self
.mgr
.inventory
.get_addr(hostname
)
129 def _set_service_url_on_dashboard(self
,
133 service_url
: str) -> None:
134 """A helper to get and set service_url via Dashboard's MON command.
136 If result of get_mon_cmd differs from service_url, set_mon_cmd will
137 be sent to set the service_url.
139 def get_set_cmd_dicts(out
: str) -> List
[dict]:
141 'prefix': set_mon_cmd
,
144 return [cmd_dict
] if service_url
!= out
else []
146 self
._check
_and
_set
_dashboard
(
147 service_name
=service_name
,
149 get_set_cmd_dicts
=get_set_cmd_dicts
152 def _check_and_set_dashboard(self
,
155 get_set_cmd_dicts
: Callable
[[str], List
[dict]]) -> None:
156 """A helper to set configs in the Dashboard.
158 The method is useful for the pattern:
159 - Getting a config from Dashboard by using a Dashboard command. e.g. current iSCSI
161 - Parse or deserialize previous output. e.g. Dashboard command returns a JSON string.
162 - Determine if the config need to be update. NOTE: This step is important because if a
163 Dashboard command modified Ceph config, cephadm's config_notify() is called. Which
164 kicks the serve() loop and the logic using this method is likely to be called again.
165 A config should be updated only when needed.
166 - Update a config in Dashboard by using a Dashboard command.
168 :param service_name: the service name to be used for logging
169 :type service_name: str
170 :param get_cmd: Dashboard command prefix to get config. e.g. dashboard get-grafana-api-url
172 :param get_set_cmd_dicts: function to create a list, and each item is a command dictionary.
176 'prefix': 'dashboard iscsi-gateway-add',
177 'service_url': 'http://admin:admin@aaa:5000',
181 'prefix': 'dashboard iscsi-gateway-add',
182 'service_url': 'http://admin:admin@bbb:5000',
186 The function should return empty list if no command need to be sent.
187 :type get_set_cmd_dicts: Callable[[str], List[dict]]
191 _
, out
, _
= self
.mgr
.check_mon_command({
194 except MonCommandFailed
as e
:
195 logger
.warning('Failed to get Dashboard config for %s: %s', service_name
, e
)
197 cmd_dicts
= get_set_cmd_dicts(out
.strip())
198 for cmd_dict
in list(cmd_dicts
):
200 _
, out
, _
= self
.mgr
.check_mon_command(cmd_dict
)
201 except MonCommandFailed
as e
:
202 logger
.warning('Failed to set Dashboard config for %s: %s', service_name
, e
)
204 def ok_to_stop(self
, daemon_ids
: List
[str]) -> HandleCommandResult
:
205 names
= [f
'{self.TYPE}.{d_id}' for d_id
in daemon_ids
]
206 out
= f
'It is presumed safe to stop {names}'
207 err
= f
'It is NOT safe to stop {names}'
209 if self
.TYPE
not in ['mon', 'osd', 'mds']:
211 return HandleCommandResult(0, out
, None)
213 r
= HandleCommandResult(*self
.mgr
.mon_command({
214 'prefix': f
'{self.TYPE} ok-to-stop',
219 err
= f
'{err}: {r.stderr}' if r
.stderr
else err
221 return HandleCommandResult(r
.retval
, r
.stdout
, err
)
223 out
= f
'{out}: {r.stdout}' if r
.stdout
else out
225 return HandleCommandResult(r
.retval
, out
, r
.stderr
)
227 def pre_remove(self
, daemon
: DaemonDescription
) -> None:
229 Called before the daemon is removed.
231 assert self
.TYPE
== daemon
.daemon_type
232 logger
.debug(f
'Pre remove daemon {self.TYPE}.{daemon.daemon_id}')
234 def post_remove(self
, daemon
: DaemonDescription
) -> None:
236 Called after the daemon is removed.
238 assert self
.TYPE
== daemon
.daemon_type
239 logger
.debug(f
'Post remove daemon {self.TYPE}.{daemon.daemon_id}')
242 class CephService(CephadmService
):
243 def generate_config(self
, daemon_spec
: CephadmDaemonSpec
) -> Tuple
[Dict
[str, Any
], List
[str]]:
244 # Ceph.daemons (mon, mgr, mds, osd, etc)
245 cephadm_config
= self
.get_config_and_keyring(
246 daemon_spec
.daemon_type
,
247 daemon_spec
.daemon_id
,
248 host
=daemon_spec
.host
,
249 keyring
=daemon_spec
.keyring
,
250 extra_ceph_config
=daemon_spec
.ceph_conf
)
252 if daemon_spec
.config_get_files():
253 cephadm_config
.update({'files': daemon_spec
.config_get_files()})
255 return cephadm_config
, []
257 def post_remove(self
, daemon
: DaemonDescription
) -> None:
258 super().post_remove(daemon
)
259 self
.remove_keyring(daemon
)
261 def get_auth_entity(self
, daemon_id
: str, host
: str = "") -> AuthEntity
:
263 Map the daemon id to a cephx keyring entity name
265 if self
.TYPE
in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]:
266 return AuthEntity(f
'client.{self.TYPE}.{daemon_id}')
267 elif self
.TYPE
== 'crash':
269 raise OrchestratorError("Host not provided to generate <crash> auth entity name")
270 return AuthEntity(f
'client.{self.TYPE}.{host}')
271 elif self
.TYPE
== 'mon':
272 return AuthEntity('mon.')
273 elif self
.TYPE
in ['mgr', 'osd', 'mds']:
274 return AuthEntity(f
'{self.TYPE}.{daemon_id}')
276 raise OrchestratorError("unknown daemon type")
278 def get_config_and_keyring(self
,
282 keyring
: Optional
[str] = None,
283 extra_ceph_config
: Optional
[str] = None
287 entity
: AuthEntity
= self
.get_auth_entity(daemon_id
, host
=host
)
288 ret
, keyring
, err
= self
.mgr
.check_mon_command({
289 'prefix': 'auth get',
293 config
= self
.mgr
.get_minimal_ceph_conf()
295 if extra_ceph_config
:
296 config
+= extra_ceph_config
303 def remove_keyring(self
, daemon
: DaemonDescription
) -> None:
304 daemon_id
: str = daemon
.daemon_id
305 host
: str = daemon
.hostname
307 if daemon_id
== 'mon':
308 # do not remove the mon keyring
311 entity
= self
.get_auth_entity(daemon_id
, host
=host
)
313 logger
.info(f
'Remove keyring: {entity}')
314 ret
, out
, err
= self
.mgr
.check_mon_command({
320 class MonService(CephService
):
323 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
325 Create a new monitor on the given host.
327 assert self
.TYPE
== daemon_spec
.daemon_type
328 name
, host
, network
= daemon_spec
.daemon_id
, daemon_spec
.host
, daemon_spec
.network
331 ret
, keyring
, err
= self
.mgr
.check_mon_command({
332 'prefix': 'auth get',
333 'entity': self
.get_auth_entity(name
),
336 extra_config
= '[mon.%s]\n' % name
338 # infer whether this is a CIDR network, addrvec, or plain IP
340 extra_config
+= 'public network = %s\n' % network
341 elif network
.startswith('[v') and network
.endswith(']'):
342 extra_config
+= 'public addrv = %s\n' % network
343 elif is_ipv6(network
):
344 extra_config
+= 'public addr = %s\n' % unwrap_ipv6(network
)
345 elif ':' not in network
:
346 extra_config
+= 'public addr = %s\n' % network
348 raise OrchestratorError(
349 'Must specify a CIDR network, ceph addrvec, or plain IP: \'%s\'' % network
)
351 # try to get the public_network from the config
352 ret
, network
, err
= self
.mgr
.check_mon_command({
353 'prefix': 'config get',
355 'key': 'public_network',
357 network
= network
.strip() if network
else network
359 raise OrchestratorError(
360 'Must set public_network config option or specify a CIDR network, ceph addrvec, or plain IP')
361 if '/' not in network
:
362 raise OrchestratorError(
363 'public_network is set but does not look like a CIDR network: \'%s\'' % network
)
364 extra_config
+= 'public network = %s\n' % network
366 daemon_spec
.ceph_conf
= extra_config
367 daemon_spec
.keyring
= keyring
371 def _check_safe_to_destroy(self
, mon_id
: str) -> None:
372 ret
, out
, err
= self
.mgr
.check_mon_command({
373 'prefix': 'quorum_status',
377 except Exception as e
:
378 raise OrchestratorError('failed to parse quorum status')
380 mons
= [m
['name'] for m
in j
['monmap']['mons']]
381 if mon_id
not in mons
:
382 logger
.info('Safe to remove mon.%s: not in monmap (%s)' % (
385 new_mons
= [m
for m
in mons
if m
!= mon_id
]
386 new_quorum
= [m
for m
in j
['quorum_names'] if m
!= mon_id
]
387 if len(new_quorum
) > len(new_mons
) / 2:
388 logger
.info('Safe to remove mon.%s: new quorum should be %s (from %s)' %
389 (mon_id
, new_quorum
, new_mons
))
391 raise OrchestratorError(
392 'Removing %s would break mon quorum (new quorum %s, new mons %s)' % (mon_id
, new_quorum
, new_mons
))
394 def pre_remove(self
, daemon
: DaemonDescription
) -> None:
395 super().pre_remove(daemon
)
397 daemon_id
: str = daemon
.daemon_id
398 self
._check
_safe
_to
_destroy
(daemon_id
)
400 # remove mon from quorum before we destroy the daemon
401 logger
.info('Removing monitor %s from monmap...' % daemon_id
)
402 ret
, out
, err
= self
.mgr
.check_mon_command({
408 class MgrService(CephService
):
411 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
413 Create a new manager instance on a host.
415 assert self
.TYPE
== daemon_spec
.daemon_type
416 mgr_id
, host
= daemon_spec
.daemon_id
, daemon_spec
.host
419 ret
, keyring
, err
= self
.mgr
.check_mon_command({
420 'prefix': 'auth get-or-create',
421 'entity': self
.get_auth_entity(mgr_id
),
422 'caps': ['mon', 'profile mgr',
427 # Retrieve ports used by manager modules
428 # In the case of the dashboard port and with several manager daemons
429 # running in different hosts, it exists the possibility that the
430 # user has decided to use different dashboard ports in each server
431 # If this is the case then the dashboard port opened will be only the used
435 ret
, mgr_services
, err
= self
.mgr
.check_mon_command({
436 'prefix': 'mgr services',
439 mgr_endpoints
= json
.loads(mgr_services
)
440 for end_point
in mgr_endpoints
.values():
441 port
= re
.search(r
'\:\d+\/', end_point
)
443 ports
.append(int(port
[0][1:-1]))
446 daemon_spec
.ports
= ports
448 daemon_spec
.keyring
= keyring
452 def get_active_daemon(self
, daemon_descrs
: List
[DaemonDescription
]) -> DaemonDescription
:
453 for daemon
in daemon_descrs
:
454 if self
.mgr
.daemon_is_self(daemon
.daemon_type
, daemon
.daemon_id
):
456 # if no active mgr found, return empty Daemon Desc
457 return DaemonDescription()
459 def fail_over(self
) -> None:
460 if not self
.mgr_map_has_standby():
461 raise OrchestratorError('Need standby mgr daemon', event_kind_subject
=(
462 'daemon', 'mgr' + self
.mgr
.get_mgr_id()))
464 self
.mgr
.events
.for_daemon('mgr' + self
.mgr
.get_mgr_id(),
465 'INFO', 'Failing over to other MGR')
466 logger
.info('Failing over to other MGR')
469 ret
, out
, err
= self
.mgr
.check_mon_command({
470 'prefix': 'mgr fail',
471 'who': self
.mgr
.get_mgr_id(),
474 def mgr_map_has_standby(self
) -> bool:
476 This is a bit safer than asking our inventory. If the mgr joined the mgr map,
477 we know it joined the cluster
479 mgr_map
= self
.mgr
.get('mgr_map')
480 num
= len(mgr_map
.get('standbys'))
484 class MdsService(CephService
):
487 def config(self
, spec
: ServiceSpec
) -> None:
488 assert self
.TYPE
== spec
.service_type
489 assert spec
.service_id
491 # ensure mds_join_fs is set for these daemons
492 ret
, out
, err
= self
.mgr
.check_mon_command({
493 'prefix': 'config set',
494 'who': 'mds.' + spec
.service_id
,
495 'name': 'mds_join_fs',
496 'value': spec
.service_id
,
499 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
500 assert self
.TYPE
== daemon_spec
.daemon_type
501 mds_id
, host
= daemon_spec
.daemon_id
, daemon_spec
.host
504 ret
, keyring
, err
= self
.mgr
.check_mon_command({
505 'prefix': 'auth get-or-create',
506 'entity': self
.get_auth_entity(mds_id
),
507 'caps': ['mon', 'profile mds',
508 'osd', 'allow rw tag cephfs *=*',
511 daemon_spec
.keyring
= keyring
515 def get_active_daemon(self
, daemon_descrs
: List
[DaemonDescription
]) -> DaemonDescription
:
516 active_mds_strs
= list()
517 for fs
in self
.mgr
.get('fs_map')['filesystems']:
518 mds_map
= fs
['mdsmap']
519 if mds_map
is not None:
520 for mds_id
, mds_status
in mds_map
['info'].items():
521 if mds_status
['state'] == 'up:active':
522 active_mds_strs
.append(mds_status
['name'])
523 if len(active_mds_strs
) != 0:
524 for daemon
in daemon_descrs
:
525 if daemon
.daemon_id
in active_mds_strs
:
527 # if no mds found, return empty Daemon Desc
528 return DaemonDescription()
531 class RgwService(CephService
):
534 def config(self
, spec
: RGWSpec
, rgw_id
: str) -> None:
535 assert self
.TYPE
== spec
.service_type
537 # create realm, zonegroup, and zone if needed
538 self
.create_realm_zonegroup_zone(spec
, rgw_id
)
540 # ensure rgw_realm and rgw_zone is set for these daemons
541 ret
, out
, err
= self
.mgr
.check_mon_command({
542 'prefix': 'config set',
543 'who': f
"{utils.name_to_config_section('rgw')}.{spec.service_id}",
545 'value': spec
.rgw_zone
,
547 ret
, out
, err
= self
.mgr
.check_mon_command({
548 'prefix': 'config set',
549 'who': f
"{utils.name_to_config_section('rgw')}.{spec.rgw_realm}",
551 'value': spec
.rgw_realm
,
553 ret
, out
, err
= self
.mgr
.check_mon_command({
554 'prefix': 'config set',
555 'who': f
"{utils.name_to_config_section('rgw')}.{spec.service_id}",
556 'name': 'rgw_frontends',
557 'value': spec
.rgw_frontends_config_value(),
560 if spec
.rgw_frontend_ssl_certificate
:
561 if isinstance(spec
.rgw_frontend_ssl_certificate
, list):
562 cert_data
= '\n'.join(spec
.rgw_frontend_ssl_certificate
)
563 elif isinstance(spec
.rgw_frontend_ssl_certificate
, str):
564 cert_data
= spec
.rgw_frontend_ssl_certificate
566 raise OrchestratorError(
567 'Invalid rgw_frontend_ssl_certificate: %s'
568 % spec
.rgw_frontend_ssl_certificate
)
569 ret
, out
, err
= self
.mgr
.check_mon_command({
570 'prefix': 'config-key set',
571 'key': f
'rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.crt',
575 if spec
.rgw_frontend_ssl_key
:
576 if isinstance(spec
.rgw_frontend_ssl_key
, list):
577 key_data
= '\n'.join(spec
.rgw_frontend_ssl_key
)
578 elif isinstance(spec
.rgw_frontend_ssl_certificate
, str):
579 key_data
= spec
.rgw_frontend_ssl_key
581 raise OrchestratorError(
582 'Invalid rgw_frontend_ssl_key: %s'
583 % spec
.rgw_frontend_ssl_key
)
584 ret
, out
, err
= self
.mgr
.check_mon_command({
585 'prefix': 'config-key set',
586 'key': f
'rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.key',
590 logger
.info('Saving service %s spec with placement %s' % (
591 spec
.service_name(), spec
.placement
.pretty_str()))
592 self
.mgr
.spec_store
.save(spec
)
594 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
595 assert self
.TYPE
== daemon_spec
.daemon_type
596 rgw_id
, host
= daemon_spec
.daemon_id
, daemon_spec
.host
598 keyring
= self
.get_keyring(rgw_id
)
600 daemon_spec
.keyring
= keyring
604 def get_keyring(self
, rgw_id
: str) -> str:
605 ret
, keyring
, err
= self
.mgr
.check_mon_command({
606 'prefix': 'auth get-or-create',
607 'entity': self
.get_auth_entity(rgw_id
),
608 'caps': ['mon', 'allow *',
610 'osd', 'allow rwx tag rgw *=*'],
614 def create_realm_zonegroup_zone(self
, spec
: RGWSpec
, rgw_id
: str) -> None:
615 if utils
.get_cluster_health(self
.mgr
) != 'HEALTH_OK':
616 raise OrchestratorError('Health not ok, will try again when health ok')
618 # get keyring needed to run rados commands and strip out just the keyring
619 keyring
= self
.get_keyring(rgw_id
).split('key = ', 1)[1].rstrip()
621 # We can call radosgw-admin within the container, cause cephadm gives the MGR the required keyring permissions
623 def get_realms() -> List
[str]:
624 cmd
= ['radosgw-admin',
625 '--key=%s' % keyring
,
626 '--user', 'rgw.%s' % rgw_id
,
629 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
635 return j
.get('realms', [])
636 except Exception as e
:
637 raise OrchestratorError('failed to parse realm info')
639 def create_realm() -> None:
640 cmd
= ['radosgw-admin',
641 '--key=%s' % keyring
,
642 '--user', 'rgw.%s' % rgw_id
,
644 '--rgw-realm=%s' % spec
.rgw_realm
,
646 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
647 self
.mgr
.log
.info('created realm: %s' % spec
.rgw_realm
)
649 def get_zonegroups() -> List
[str]:
650 cmd
= ['radosgw-admin',
651 '--key=%s' % keyring
,
652 '--user', 'rgw.%s' % rgw_id
,
655 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
661 return j
.get('zonegroups', [])
662 except Exception as e
:
663 raise OrchestratorError('failed to parse zonegroup info')
665 def create_zonegroup() -> None:
666 cmd
= ['radosgw-admin',
667 '--key=%s' % keyring
,
668 '--user', 'rgw.%s' % rgw_id
,
669 'zonegroup', 'create',
670 '--rgw-zonegroup=default',
671 '--master', '--default']
672 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
673 self
.mgr
.log
.info('created zonegroup: default')
675 def create_zonegroup_if_required() -> None:
676 zonegroups
= get_zonegroups()
677 if 'default' not in zonegroups
:
680 def get_zones() -> List
[str]:
681 cmd
= ['radosgw-admin',
682 '--key=%s' % keyring
,
683 '--user', 'rgw.%s' % rgw_id
,
686 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
692 return j
.get('zones', [])
693 except Exception as e
:
694 raise OrchestratorError('failed to parse zone info')
696 def create_zone() -> None:
697 cmd
= ['radosgw-admin',
698 '--key=%s' % keyring
,
699 '--user', 'rgw.%s' % rgw_id
,
701 '--rgw-zonegroup=default',
702 '--rgw-zone=%s' % spec
.rgw_zone
,
703 '--master', '--default']
704 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
705 self
.mgr
.log
.info('created zone: %s' % spec
.rgw_zone
)
708 realms
= get_realms()
709 if spec
.rgw_realm
not in realms
:
714 if spec
.rgw_zone
not in zones
:
715 create_zonegroup_if_required()
719 # update period if changes were made
721 cmd
= ['radosgw-admin',
722 '--key=%s' % keyring
,
723 '--user', 'rgw.%s' % rgw_id
,
725 '--rgw-realm=%s' % spec
.rgw_realm
,
727 result
= subprocess
.run(cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
728 self
.mgr
.log
.info('updated period')
731 class RbdMirrorService(CephService
):
734 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
735 assert self
.TYPE
== daemon_spec
.daemon_type
736 daemon_id
, host
= daemon_spec
.daemon_id
, daemon_spec
.host
738 ret
, keyring
, err
= self
.mgr
.check_mon_command({
739 'prefix': 'auth get-or-create',
740 'entity': self
.get_auth_entity(daemon_id
),
741 'caps': ['mon', 'profile rbd-mirror',
742 'osd', 'profile rbd'],
745 daemon_spec
.keyring
= keyring
750 class CrashService(CephService
):
753 def prepare_create(self
, daemon_spec
: CephadmDaemonSpec
) -> CephadmDaemonSpec
:
754 assert self
.TYPE
== daemon_spec
.daemon_type
755 daemon_id
, host
= daemon_spec
.daemon_id
, daemon_spec
.host
757 ret
, keyring
, err
= self
.mgr
.check_mon_command({
758 'prefix': 'auth get-or-create',
759 'entity': self
.get_auth_entity(daemon_id
, host
=host
),
760 'caps': ['mon', 'profile crash',
761 'mgr', 'profile crash'],
764 daemon_spec
.keyring
= keyring