]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/cephadm/services/cephadmservice.py
import ceph 15.2.10
[ceph.git] / ceph / src / pybind / mgr / cephadm / services / cephadmservice.py
1 import json
2 import re
3 import logging
4 import subprocess
5 from abc import ABCMeta, abstractmethod
6 from typing import TYPE_CHECKING, List, Callable, Any, TypeVar, Generic, \
7 Optional, Dict, Any, Tuple, NewType
8
9 from mgr_module import HandleCommandResult, MonCommandFailed
10
11 from ceph.deployment.service_spec import ServiceSpec, RGWSpec
12 from ceph.deployment.utils import is_ipv6, unwrap_ipv6
13 from orchestrator import OrchestratorError, DaemonDescription
14 from cephadm import utils
15
16 if TYPE_CHECKING:
17 from cephadm.module import CephadmOrchestrator
18
19 logger = logging.getLogger(__name__)
20
21 ServiceSpecs = TypeVar('ServiceSpecs', bound=ServiceSpec)
22 AuthEntity = NewType('AuthEntity', str)
23
24
25 class CephadmDaemonSpec(Generic[ServiceSpecs]):
26 # typing.NamedTuple + Generic is broken in py36
27 def __init__(self, host: str, daemon_id: str,
28 spec: Optional[ServiceSpecs] = None,
29 network: Optional[str] = None,
30 keyring: Optional[str] = None,
31 extra_args: Optional[List[str]] = None,
32 ceph_conf: str = '',
33 extra_files: Optional[Dict[str, Any]] = None,
34 daemon_type: Optional[str] = None,
35 ports: Optional[List[int]] = None,):
36 """
37 Used for
38 * deploying new daemons. then everything is set
39 * redeploying existing daemons, then only the first three attrs are set.
40
41 Would be great to have a consistent usage where all properties are set.
42 """
43 self.host: str = host
44 self.daemon_id = daemon_id
45 daemon_type = daemon_type or (spec.service_type if spec else None)
46 assert daemon_type is not None
47 self.daemon_type: str = daemon_type
48
49 # would be great to have the spec always available:
50 self.spec: Optional[ServiceSpecs] = spec
51
52 # mons
53 self.network = network
54
55 # for run_cephadm.
56 self.keyring: Optional[str] = keyring
57
58 # For run_cephadm. Would be great to have more expressive names.
59 self.extra_args: List[str] = extra_args or []
60
61 self.ceph_conf = ceph_conf
62 self.extra_files = extra_files or {}
63
64 # TCP ports used by the daemon
65 self.ports: List[int] = ports or []
66
67 def name(self) -> str:
68 return '%s.%s' % (self.daemon_type, self.daemon_id)
69
70 def config_get_files(self) -> Dict[str, Any]:
71 files = self.extra_files
72 if self.ceph_conf:
73 files['config'] = self.ceph_conf
74
75 return files
76
77
78 class CephadmService(metaclass=ABCMeta):
79 """
80 Base class for service types. Often providing a create() and config() fn.
81 """
82
83 @property
84 @abstractmethod
85 def TYPE(self) -> str:
86 pass
87
88 def __init__(self, mgr: "CephadmOrchestrator"):
89 self.mgr: "CephadmOrchestrator" = mgr
90
91 def make_daemon_spec(self, host: str,
92 daemon_id: str,
93 netowrk: str,
94 spec: ServiceSpecs) -> CephadmDaemonSpec:
95 return CephadmDaemonSpec(
96 host=host,
97 daemon_id=daemon_id,
98 spec=spec,
99 network=netowrk
100 )
101
102 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
103 raise NotImplementedError()
104
105 def generate_config(self, daemon_spec: CephadmDaemonSpec) -> Tuple[Dict[str, Any], List[str]]:
106 raise NotImplementedError()
107
108 def daemon_check_post(self, daemon_descrs: List[DaemonDescription]) -> None:
109 """The post actions needed to be done after daemons are checked"""
110 if self.mgr.config_dashboard:
111 if 'dashboard' in self.mgr.get('mgr_map')['modules']:
112 self.config_dashboard(daemon_descrs)
113 else:
114 logger.debug('Dashboard is not enabled. Skip configuration.')
115
116 def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None:
117 """Config dashboard settings."""
118 raise NotImplementedError()
119
120 def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
121 # if this is called for a service type where it hasn't explcitly been
122 # defined, return empty Daemon Desc
123 return DaemonDescription()
124
125 def _inventory_get_addr(self, hostname: str) -> str:
126 """Get a host's address with its hostname."""
127 return self.mgr.inventory.get_addr(hostname)
128
129 def _set_service_url_on_dashboard(self,
130 service_name: str,
131 get_mon_cmd: str,
132 set_mon_cmd: str,
133 service_url: str) -> None:
134 """A helper to get and set service_url via Dashboard's MON command.
135
136 If result of get_mon_cmd differs from service_url, set_mon_cmd will
137 be sent to set the service_url.
138 """
139 def get_set_cmd_dicts(out: str) -> List[dict]:
140 cmd_dict = {
141 'prefix': set_mon_cmd,
142 'value': service_url
143 }
144 return [cmd_dict] if service_url != out else []
145
146 self._check_and_set_dashboard(
147 service_name=service_name,
148 get_cmd=get_mon_cmd,
149 get_set_cmd_dicts=get_set_cmd_dicts
150 )
151
152 def _check_and_set_dashboard(self,
153 service_name: str,
154 get_cmd: str,
155 get_set_cmd_dicts: Callable[[str], List[dict]]) -> None:
156 """A helper to set configs in the Dashboard.
157
158 The method is useful for the pattern:
159 - Getting a config from Dashboard by using a Dashboard command. e.g. current iSCSI
160 gateways.
161 - Parse or deserialize previous output. e.g. Dashboard command returns a JSON string.
162 - Determine if the config need to be update. NOTE: This step is important because if a
163 Dashboard command modified Ceph config, cephadm's config_notify() is called. Which
164 kicks the serve() loop and the logic using this method is likely to be called again.
165 A config should be updated only when needed.
166 - Update a config in Dashboard by using a Dashboard command.
167
168 :param service_name: the service name to be used for logging
169 :type service_name: str
170 :param get_cmd: Dashboard command prefix to get config. e.g. dashboard get-grafana-api-url
171 :type get_cmd: str
172 :param get_set_cmd_dicts: function to create a list, and each item is a command dictionary.
173 e.g.
174 [
175 {
176 'prefix': 'dashboard iscsi-gateway-add',
177 'service_url': 'http://admin:admin@aaa:5000',
178 'name': 'aaa'
179 },
180 {
181 'prefix': 'dashboard iscsi-gateway-add',
182 'service_url': 'http://admin:admin@bbb:5000',
183 'name': 'bbb'
184 }
185 ]
186 The function should return empty list if no command need to be sent.
187 :type get_set_cmd_dicts: Callable[[str], List[dict]]
188 """
189
190 try:
191 _, out, _ = self.mgr.check_mon_command({
192 'prefix': get_cmd
193 })
194 except MonCommandFailed as e:
195 logger.warning('Failed to get Dashboard config for %s: %s', service_name, e)
196 return
197 cmd_dicts = get_set_cmd_dicts(out.strip())
198 for cmd_dict in list(cmd_dicts):
199 try:
200 inbuf = cmd_dict.pop('inbuf', None)
201 _, out, _ = self.mgr.check_mon_command(cmd_dict, inbuf)
202 except MonCommandFailed as e:
203 logger.warning('Failed to set Dashboard config for %s: %s', service_name, e)
204
205 def ok_to_stop(self, daemon_ids: List[str]) -> HandleCommandResult:
206 names = [f'{self.TYPE}.{d_id}' for d_id in daemon_ids]
207 out = f'It is presumed safe to stop {names}'
208 err = f'It is NOT safe to stop {names}'
209
210 if self.TYPE not in ['mon', 'osd', 'mds']:
211 logger.info(out)
212 return HandleCommandResult(0, out, None)
213
214 r = HandleCommandResult(*self.mgr.mon_command({
215 'prefix': f'{self.TYPE} ok-to-stop',
216 'ids': daemon_ids,
217 }))
218
219 if r.retval:
220 err = f'{err}: {r.stderr}' if r.stderr else err
221 logger.error(err)
222 return HandleCommandResult(r.retval, r.stdout, err)
223
224 out = f'{out}: {r.stdout}' if r.stdout else out
225 logger.info(out)
226 return HandleCommandResult(r.retval, out, r.stderr)
227
228 def pre_remove(self, daemon: DaemonDescription) -> None:
229 """
230 Called before the daemon is removed.
231 """
232 assert self.TYPE == daemon.daemon_type
233 logger.debug(f'Pre remove daemon {self.TYPE}.{daemon.daemon_id}')
234
235 def post_remove(self, daemon: DaemonDescription) -> None:
236 """
237 Called after the daemon is removed.
238 """
239 assert self.TYPE == daemon.daemon_type
240 logger.debug(f'Post remove daemon {self.TYPE}.{daemon.daemon_id}')
241
242
243 class CephService(CephadmService):
244 def generate_config(self, daemon_spec: CephadmDaemonSpec) -> Tuple[Dict[str, Any], List[str]]:
245 # Ceph.daemons (mon, mgr, mds, osd, etc)
246 cephadm_config = self.get_config_and_keyring(
247 daemon_spec.daemon_type,
248 daemon_spec.daemon_id,
249 host=daemon_spec.host,
250 keyring=daemon_spec.keyring,
251 extra_ceph_config=daemon_spec.ceph_conf)
252
253 if daemon_spec.config_get_files():
254 cephadm_config.update({'files': daemon_spec.config_get_files()})
255
256 return cephadm_config, []
257
258 def post_remove(self, daemon: DaemonDescription) -> None:
259 super().post_remove(daemon)
260 self.remove_keyring(daemon)
261
262 def get_auth_entity(self, daemon_id: str, host: str = "") -> AuthEntity:
263 """
264 Map the daemon id to a cephx keyring entity name
265 """
266 if self.TYPE in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]:
267 return AuthEntity(f'client.{self.TYPE}.{daemon_id}')
268 elif self.TYPE == 'crash':
269 if host == "":
270 raise OrchestratorError("Host not provided to generate <crash> auth entity name")
271 return AuthEntity(f'client.{self.TYPE}.{host}')
272 elif self.TYPE == 'mon':
273 return AuthEntity('mon.')
274 elif self.TYPE in ['mgr', 'osd', 'mds']:
275 return AuthEntity(f'{self.TYPE}.{daemon_id}')
276 else:
277 raise OrchestratorError("unknown daemon type")
278
279 def get_config_and_keyring(self,
280 daemon_type: str,
281 daemon_id: str,
282 host: str,
283 keyring: Optional[str] = None,
284 extra_ceph_config: Optional[str] = None
285 ) -> Dict[str, Any]:
286 # keyring
287 if not keyring:
288 entity: AuthEntity = self.get_auth_entity(daemon_id, host=host)
289 ret, keyring, err = self.mgr.check_mon_command({
290 'prefix': 'auth get',
291 'entity': entity,
292 })
293
294 config = self.mgr.get_minimal_ceph_conf()
295
296 if extra_ceph_config:
297 config += extra_ceph_config
298
299 return {
300 'config': config,
301 'keyring': keyring,
302 }
303
304 def remove_keyring(self, daemon: DaemonDescription) -> None:
305 daemon_id: str = daemon.daemon_id
306 host: str = daemon.hostname
307
308 if daemon_id == 'mon':
309 # do not remove the mon keyring
310 return
311
312 entity = self.get_auth_entity(daemon_id, host=host)
313
314 logger.info(f'Remove keyring: {entity}')
315 ret, out, err = self.mgr.check_mon_command({
316 'prefix': 'auth rm',
317 'entity': entity,
318 })
319
320
321 class MonService(CephService):
322 TYPE = 'mon'
323
324 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
325 """
326 Create a new monitor on the given host.
327 """
328 assert self.TYPE == daemon_spec.daemon_type
329 name, host, network = daemon_spec.daemon_id, daemon_spec.host, daemon_spec.network
330
331 # get mon. key
332 ret, keyring, err = self.mgr.check_mon_command({
333 'prefix': 'auth get',
334 'entity': self.get_auth_entity(name),
335 })
336
337 extra_config = '[mon.%s]\n' % name
338 if network:
339 # infer whether this is a CIDR network, addrvec, or plain IP
340 if '/' in network:
341 extra_config += 'public network = %s\n' % network
342 elif network.startswith('[v') and network.endswith(']'):
343 extra_config += 'public addrv = %s\n' % network
344 elif is_ipv6(network):
345 extra_config += 'public addr = %s\n' % unwrap_ipv6(network)
346 elif ':' not in network:
347 extra_config += 'public addr = %s\n' % network
348 else:
349 raise OrchestratorError(
350 'Must specify a CIDR network, ceph addrvec, or plain IP: \'%s\'' % network)
351 else:
352 # try to get the public_network from the config
353 ret, network, err = self.mgr.check_mon_command({
354 'prefix': 'config get',
355 'who': 'mon',
356 'key': 'public_network',
357 })
358 network = network.strip() if network else network
359 if not network:
360 raise OrchestratorError(
361 'Must set public_network config option or specify a CIDR network, ceph addrvec, or plain IP')
362 if '/' not in network:
363 raise OrchestratorError(
364 'public_network is set but does not look like a CIDR network: \'%s\'' % network)
365 extra_config += 'public network = %s\n' % network
366
367 daemon_spec.ceph_conf = extra_config
368 daemon_spec.keyring = keyring
369
370 return daemon_spec
371
372 def _check_safe_to_destroy(self, mon_id: str) -> None:
373 ret, out, err = self.mgr.check_mon_command({
374 'prefix': 'quorum_status',
375 })
376 try:
377 j = json.loads(out)
378 except Exception as e:
379 raise OrchestratorError('failed to parse quorum status')
380
381 mons = [m['name'] for m in j['monmap']['mons']]
382 if mon_id not in mons:
383 logger.info('Safe to remove mon.%s: not in monmap (%s)' % (
384 mon_id, mons))
385 return
386 new_mons = [m for m in mons if m != mon_id]
387 new_quorum = [m for m in j['quorum_names'] if m != mon_id]
388 if len(new_quorum) > len(new_mons) / 2:
389 logger.info('Safe to remove mon.%s: new quorum should be %s (from %s)' %
390 (mon_id, new_quorum, new_mons))
391 return
392 raise OrchestratorError(
393 'Removing %s would break mon quorum (new quorum %s, new mons %s)' % (mon_id, new_quorum, new_mons))
394
395 def pre_remove(self, daemon: DaemonDescription) -> None:
396 super().pre_remove(daemon)
397
398 daemon_id: str = daemon.daemon_id
399 self._check_safe_to_destroy(daemon_id)
400
401 # remove mon from quorum before we destroy the daemon
402 logger.info('Removing monitor %s from monmap...' % daemon_id)
403 ret, out, err = self.mgr.check_mon_command({
404 'prefix': 'mon rm',
405 'name': daemon_id,
406 })
407
408
409 class MgrService(CephService):
410 TYPE = 'mgr'
411
412 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
413 """
414 Create a new manager instance on a host.
415 """
416 assert self.TYPE == daemon_spec.daemon_type
417 mgr_id, host = daemon_spec.daemon_id, daemon_spec.host
418
419 # get mgr. key
420 ret, keyring, err = self.mgr.check_mon_command({
421 'prefix': 'auth get-or-create',
422 'entity': self.get_auth_entity(mgr_id),
423 'caps': ['mon', 'profile mgr',
424 'osd', 'allow *',
425 'mds', 'allow *'],
426 })
427
428 # Retrieve ports used by manager modules
429 # In the case of the dashboard port and with several manager daemons
430 # running in different hosts, it exists the possibility that the
431 # user has decided to use different dashboard ports in each server
432 # If this is the case then the dashboard port opened will be only the used
433 # as default.
434 ports = []
435 config_ports = ''
436 ret, mgr_services, err = self.mgr.check_mon_command({
437 'prefix': 'mgr services',
438 })
439 if mgr_services:
440 mgr_endpoints = json.loads(mgr_services)
441 for end_point in mgr_endpoints.values():
442 port = re.search(r'\:\d+\/', end_point)
443 if port:
444 ports.append(int(port[0][1:-1]))
445
446 if ports:
447 daemon_spec.ports = ports
448
449 daemon_spec.keyring = keyring
450
451 return daemon_spec
452
453 def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
454 for daemon in daemon_descrs:
455 if self.mgr.daemon_is_self(daemon.daemon_type, daemon.daemon_id):
456 return daemon
457 # if no active mgr found, return empty Daemon Desc
458 return DaemonDescription()
459
460 def fail_over(self) -> None:
461 if not self.mgr_map_has_standby():
462 raise OrchestratorError('Need standby mgr daemon', event_kind_subject=(
463 'daemon', 'mgr' + self.mgr.get_mgr_id()))
464
465 self.mgr.events.for_daemon('mgr' + self.mgr.get_mgr_id(),
466 'INFO', 'Failing over to other MGR')
467 logger.info('Failing over to other MGR')
468
469 # fail over
470 ret, out, err = self.mgr.check_mon_command({
471 'prefix': 'mgr fail',
472 'who': self.mgr.get_mgr_id(),
473 })
474
475 def mgr_map_has_standby(self) -> bool:
476 """
477 This is a bit safer than asking our inventory. If the mgr joined the mgr map,
478 we know it joined the cluster
479 """
480 mgr_map = self.mgr.get('mgr_map')
481 num = len(mgr_map.get('standbys'))
482 return bool(num)
483
484
485 class MdsService(CephService):
486 TYPE = 'mds'
487
488 def config(self, spec: ServiceSpec) -> None:
489 assert self.TYPE == spec.service_type
490 assert spec.service_id
491
492 # ensure mds_join_fs is set for these daemons
493 ret, out, err = self.mgr.check_mon_command({
494 'prefix': 'config set',
495 'who': 'mds.' + spec.service_id,
496 'name': 'mds_join_fs',
497 'value': spec.service_id,
498 })
499
500 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
501 assert self.TYPE == daemon_spec.daemon_type
502 mds_id, host = daemon_spec.daemon_id, daemon_spec.host
503
504 # get mgr. key
505 ret, keyring, err = self.mgr.check_mon_command({
506 'prefix': 'auth get-or-create',
507 'entity': self.get_auth_entity(mds_id),
508 'caps': ['mon', 'profile mds',
509 'osd', 'allow rw tag cephfs *=*',
510 'mds', 'allow'],
511 })
512 daemon_spec.keyring = keyring
513
514 return daemon_spec
515
516 def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
517 active_mds_strs = list()
518 for fs in self.mgr.get('fs_map')['filesystems']:
519 mds_map = fs['mdsmap']
520 if mds_map is not None:
521 for mds_id, mds_status in mds_map['info'].items():
522 if mds_status['state'] == 'up:active':
523 active_mds_strs.append(mds_status['name'])
524 if len(active_mds_strs) != 0:
525 for daemon in daemon_descrs:
526 if daemon.daemon_id in active_mds_strs:
527 return daemon
528 # if no mds found, return empty Daemon Desc
529 return DaemonDescription()
530
531
532 class RgwService(CephService):
533 TYPE = 'rgw'
534
535 def config(self, spec: RGWSpec, rgw_id: str) -> None:
536 assert self.TYPE == spec.service_type
537
538 # create realm, zonegroup, and zone if needed
539 self.create_realm_zonegroup_zone(spec, rgw_id)
540
541 # ensure rgw_realm and rgw_zone is set for these daemons
542 ret, out, err = self.mgr.check_mon_command({
543 'prefix': 'config set',
544 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
545 'name': 'rgw_zone',
546 'value': spec.rgw_zone,
547 })
548 ret, out, err = self.mgr.check_mon_command({
549 'prefix': 'config set',
550 'who': f"{utils.name_to_config_section('rgw')}.{spec.rgw_realm}",
551 'name': 'rgw_realm',
552 'value': spec.rgw_realm,
553 })
554 ret, out, err = self.mgr.check_mon_command({
555 'prefix': 'config set',
556 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
557 'name': 'rgw_frontends',
558 'value': spec.rgw_frontends_config_value(),
559 })
560
561 if spec.rgw_frontend_ssl_certificate:
562 if isinstance(spec.rgw_frontend_ssl_certificate, list):
563 cert_data = '\n'.join(spec.rgw_frontend_ssl_certificate)
564 elif isinstance(spec.rgw_frontend_ssl_certificate, str):
565 cert_data = spec.rgw_frontend_ssl_certificate
566 else:
567 raise OrchestratorError(
568 'Invalid rgw_frontend_ssl_certificate: %s'
569 % spec.rgw_frontend_ssl_certificate)
570 ret, out, err = self.mgr.check_mon_command({
571 'prefix': 'config-key set',
572 'key': f'rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.crt',
573 'val': cert_data,
574 })
575
576 if spec.rgw_frontend_ssl_key:
577 if isinstance(spec.rgw_frontend_ssl_key, list):
578 key_data = '\n'.join(spec.rgw_frontend_ssl_key)
579 elif isinstance(spec.rgw_frontend_ssl_certificate, str):
580 key_data = spec.rgw_frontend_ssl_key
581 else:
582 raise OrchestratorError(
583 'Invalid rgw_frontend_ssl_key: %s'
584 % spec.rgw_frontend_ssl_key)
585 ret, out, err = self.mgr.check_mon_command({
586 'prefix': 'config-key set',
587 'key': f'rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.key',
588 'val': key_data,
589 })
590
591 logger.info('Saving service %s spec with placement %s' % (
592 spec.service_name(), spec.placement.pretty_str()))
593 self.mgr.spec_store.save(spec)
594
595 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
596 assert self.TYPE == daemon_spec.daemon_type
597 rgw_id, host = daemon_spec.daemon_id, daemon_spec.host
598
599 keyring = self.get_keyring(rgw_id)
600
601 daemon_spec.keyring = keyring
602
603 return daemon_spec
604
605 def get_keyring(self, rgw_id: str) -> str:
606 ret, keyring, err = self.mgr.check_mon_command({
607 'prefix': 'auth get-or-create',
608 'entity': self.get_auth_entity(rgw_id),
609 'caps': ['mon', 'allow *',
610 'mgr', 'allow rw',
611 'osd', 'allow rwx tag rgw *=*'],
612 })
613 return keyring
614
615 def create_realm_zonegroup_zone(self, spec: RGWSpec, rgw_id: str) -> None:
616 if utils.get_cluster_health(self.mgr) != 'HEALTH_OK':
617 raise OrchestratorError('Health not ok, will try again when health ok')
618
619 # get keyring needed to run rados commands and strip out just the keyring
620 keyring = self.get_keyring(rgw_id).split('key = ', 1)[1].rstrip()
621
622 # We can call radosgw-admin within the container, cause cephadm gives the MGR the required keyring permissions
623
624 def get_realms() -> List[str]:
625 cmd = ['radosgw-admin',
626 '--key=%s' % keyring,
627 '--user', 'rgw.%s' % rgw_id,
628 'realm', 'list',
629 '--format=json']
630 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
631 out = result.stdout
632 if not out:
633 return []
634 try:
635 j = json.loads(out)
636 return j.get('realms', [])
637 except Exception as e:
638 raise OrchestratorError('failed to parse realm info')
639
640 def create_realm() -> None:
641 cmd = ['radosgw-admin',
642 '--key=%s' % keyring,
643 '--user', 'rgw.%s' % rgw_id,
644 'realm', 'create',
645 '--rgw-realm=%s' % spec.rgw_realm,
646 '--default']
647 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
648 self.mgr.log.info('created realm: %s' % spec.rgw_realm)
649
650 def get_zonegroups() -> List[str]:
651 cmd = ['radosgw-admin',
652 '--key=%s' % keyring,
653 '--user', 'rgw.%s' % rgw_id,
654 'zonegroup', 'list',
655 '--format=json']
656 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
657 out = result.stdout
658 if not out:
659 return []
660 try:
661 j = json.loads(out)
662 return j.get('zonegroups', [])
663 except Exception as e:
664 raise OrchestratorError('failed to parse zonegroup info')
665
666 def create_zonegroup() -> None:
667 cmd = ['radosgw-admin',
668 '--key=%s' % keyring,
669 '--user', 'rgw.%s' % rgw_id,
670 'zonegroup', 'create',
671 '--rgw-zonegroup=default',
672 '--master', '--default']
673 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
674 self.mgr.log.info('created zonegroup: default')
675
676 def create_zonegroup_if_required() -> None:
677 zonegroups = get_zonegroups()
678 if 'default' not in zonegroups:
679 create_zonegroup()
680
681 def get_zones() -> List[str]:
682 cmd = ['radosgw-admin',
683 '--key=%s' % keyring,
684 '--user', 'rgw.%s' % rgw_id,
685 'zone', 'list',
686 '--format=json']
687 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
688 out = result.stdout
689 if not out:
690 return []
691 try:
692 j = json.loads(out)
693 return j.get('zones', [])
694 except Exception as e:
695 raise OrchestratorError('failed to parse zone info')
696
697 def create_zone() -> None:
698 cmd = ['radosgw-admin',
699 '--key=%s' % keyring,
700 '--user', 'rgw.%s' % rgw_id,
701 'zone', 'create',
702 '--rgw-zonegroup=default',
703 '--rgw-zone=%s' % spec.rgw_zone,
704 '--master', '--default']
705 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
706 self.mgr.log.info('created zone: %s' % spec.rgw_zone)
707
708 changes = False
709 realms = get_realms()
710 if spec.rgw_realm not in realms:
711 create_realm()
712 changes = True
713
714 zones = get_zones()
715 if spec.rgw_zone not in zones:
716 create_zonegroup_if_required()
717 create_zone()
718 changes = True
719
720 # update period if changes were made
721 if changes:
722 cmd = ['radosgw-admin',
723 '--key=%s' % keyring,
724 '--user', 'rgw.%s' % rgw_id,
725 'period', 'update',
726 '--rgw-realm=%s' % spec.rgw_realm,
727 '--commit']
728 result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
729 self.mgr.log.info('updated period')
730
731
732 class RbdMirrorService(CephService):
733 TYPE = 'rbd-mirror'
734
735 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
736 assert self.TYPE == daemon_spec.daemon_type
737 daemon_id, host = daemon_spec.daemon_id, daemon_spec.host
738
739 ret, keyring, err = self.mgr.check_mon_command({
740 'prefix': 'auth get-or-create',
741 'entity': self.get_auth_entity(daemon_id),
742 'caps': ['mon', 'profile rbd-mirror',
743 'osd', 'profile rbd'],
744 })
745
746 daemon_spec.keyring = keyring
747
748 return daemon_spec
749
750
751 class CrashService(CephService):
752 TYPE = 'crash'
753
754 def prepare_create(self, daemon_spec: CephadmDaemonSpec) -> CephadmDaemonSpec:
755 assert self.TYPE == daemon_spec.daemon_type
756 daemon_id, host = daemon_spec.daemon_id, daemon_spec.host
757
758 ret, keyring, err = self.mgr.check_mon_command({
759 'prefix': 'auth get-or-create',
760 'entity': self.get_auth_entity(daemon_id, host=host),
761 'caps': ['mon', 'profile crash',
762 'mgr', 'profile crash'],
763 })
764
765 daemon_spec.keyring = keyring
766
767 return daemon_spec