]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/pybind/mgr/orchestrator/module.py
import 15.2.2 octopus source
[ceph.git] / ceph / src / pybind / mgr / orchestrator / module.py
index c8c3c8369423e0bfa3f3b5a72fca1b60f22f24ac..7df8c847cecc7c792de81cbb71e59ae501da2775 100644 (file)
@@ -1,30 +1,26 @@
 import datetime
 import errno
 import json
-import yaml
+from typing import List, Set, Optional, Iterator
+import re
 
+import yaml
 import six
-
-from ceph.deployment.inventory import Device
 from prettytable import PrettyTable
 
-from mgr_util import format_bytes, to_pretty_timedelta
-
-try:
-    from typing import List, Set, Optional, Dict, Iterator
-except ImportError:
-    pass  # just for type checking.
-
-
+from ceph.deployment.inventory import Device
 from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection
-
 from ceph.deployment.service_spec import PlacementSpec, ServiceSpec
+
+from mgr_util import format_bytes, to_pretty_timedelta
 from mgr_module import MgrModule, HandleCommandResult
 
 from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
     raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
     NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
-    RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta
+    RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
+    ServiceDescription, DaemonDescription, IscsiServiceSpec
+
 
 def nice_delta(now, t, suffix=''):
     if t:
@@ -32,6 +28,16 @@ def nice_delta(now, t, suffix=''):
     else:
         return '-'
 
+
+def to_format(what, format):
+    if format == 'json':
+        return json.dumps(what, sort_keys=True)
+    elif format == 'json-pretty':
+        return json.dumps(what, indent=2, sort_keys=True)
+    elif format == 'yaml':
+        return yaml.safe_dump_all(what, default_flow_style=False)
+
+
 @six.add_metaclass(CLICommandMeta)
 class OrchestratorCli(OrchestratorClientMixin, MgrModule):
     MODULE_OPTIONS = [
@@ -202,16 +208,16 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
 
     @_cli_read_command(
         'orch host ls',
-        'name=format,type=CephChoices,strings=json|plain,req=false',
+        'name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false',
         'List hosts')
     def _get_hosts(self, format='plain'):
         completion = self.get_hosts()
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
-        if format == 'json':
+        if format != 'plain':
             hosts = [host.to_json()
                      for host in completion.result]
-            output = json.dumps(hosts, sort_keys=True)
+            output = to_format(hosts, format)
         else:
             table = PrettyTable(
                 ['HOST', 'ADDR', 'LABELS', 'STATUS'],
@@ -249,7 +255,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
     @_cli_read_command(
         'orch device ls',
         "name=hostname,type=CephString,n=N,req=false "
-        "name=format,type=CephChoices,strings=json|plain,req=false "
+        "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false "
         "name=refresh,type=CephBool,req=false",
         'List devices on a host')
     def _list_devices(self, hostname=None, format='plain', refresh=False):
@@ -268,9 +274,9 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
 
-        if format == 'json':
+        if format != 'plain':
             data = [n.to_json() for n in completion.result]
-            return HandleCommandResult(stdout=json.dumps(data))
+            return HandleCommandResult(stdout=to_format(data, format))
         else:
             out = []
 
@@ -316,28 +322,36 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
         'orch ls',
         "name=service_type,type=CephString,req=false "
         "name=service_name,type=CephString,req=false "
-        "name=format,type=CephChoices,strings=json|plain,req=false "
+        "name=export,type=CephBool,req=false "
+        "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false "
         "name=refresh,type=CephBool,req=false",
         'List services known to orchestrator')
-    def _list_services(self, host=None, service_type=None, service_name=None, format='plain', refresh=False):
+    def _list_services(self, host=None, service_type=None, service_name=None, export=False, format='plain', refresh=False):
+
+        if export and format == 'plain':
+            format = 'yaml'
+
         completion = self.describe_service(service_type,
                                            service_name,
                                            refresh=refresh)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
-        services = completion.result
+        services: List[ServiceDescription] = completion.result
 
         def ukn(s):
             return '<unknown>' if s is None else s
 
         # Sort the list for display
-        services.sort(key=lambda s: (ukn(s.service_name)))
+        services.sort(key=lambda s: (ukn(s.spec.service_name())))
 
         if len(services) == 0:
             return HandleCommandResult(stdout="No services reported")
-        elif format == 'json':
-            data = [s.to_json() for s in services]
-            return HandleCommandResult(stdout=json.dumps(data))
+        elif format != 'plain':
+            if export:
+                data = [s.spec.to_json() for s in services]
+            else:
+                data = [s.to_json() for s in services]
+            return HandleCommandResult(stdout=to_format(data, format))
         else:
             now = datetime.datetime.utcnow()
             table = PrettyTable(
@@ -355,7 +369,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
             table.align['PLACEMENT'] = 'l'
             table.left_padding_width = 0
             table.right_padding_width = 2
-            for s in sorted(services, key=lambda s: s.service_name):
+            for s in services:
                 if not s.spec:
                     pl = '<no spec>'
                 elif s.spec.unmanaged:
@@ -363,7 +377,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
                 else:
                     pl = s.spec.placement.pretty_str()
                 table.add_row((
-                    s.service_name,
+                    s.spec.service_name(),
                     '%d/%d' % (s.running, s.size),
                     nice_delta(now, s.last_refresh, ' ago'),
                     nice_delta(now, s.created),
@@ -380,7 +394,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
         "name=service_name,type=CephString,req=false "
         "name=daemon_type,type=CephString,req=false "
         "name=daemon_id,type=CephString,req=false "
-        "name=format,type=CephChoices,strings=json|plain,req=false "
+        "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false "
         "name=refresh,type=CephBool,req=false",
         'List daemons known to orchestrator')
     def _list_daemons(self, hostname=None, service_name=None, daemon_type=None, daemon_id=None, format='plain', refresh=False):
@@ -391,7 +405,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
                                        refresh=refresh)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
-        daemons = completion.result
+        daemons: List[DaemonDescription] = completion.result
 
         def ukn(s):
             return '<unknown>' if s is None else s
@@ -400,9 +414,9 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
 
         if len(daemons) == 0:
             return HandleCommandResult(stdout="No daemons reported")
-        elif format == 'json':
+        elif format != 'plain':
             data = [s.to_json() for s in daemons]
-            return HandleCommandResult(stdout=json.dumps(data))
+            return HandleCommandResult(stdout=to_format(data, format))
         else:
             now = datetime.datetime.utcnow()
             table = PrettyTable(
@@ -413,12 +427,15 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
             table.left_padding_width = 0
             table.right_padding_width = 2
             for s in sorted(daemons, key=lambda s: s.name()):
-                status = {
-                    -1: 'error',
-                    0: 'stopped',
-                    1: 'running',
-                    None: '<unknown>'
-                }[s.status]
+                if s.status_desc:
+                    status = s.status_desc
+                else:
+                    status = {
+                        -1: 'error',
+                        0: 'stopped',
+                        1: 'running',
+                        None: '<unknown>'
+                    }[s.status]
                 if s.status == 1 and s.started:
                     status += ' (%s)' % to_pretty_timedelta(now - s.started)
 
@@ -435,18 +452,98 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
 
             return HandleCommandResult(stdout=table.get_string())
 
+    def set_unmanaged_flag(self, service_name: str, unmanaged_flag: bool) -> HandleCommandResult:
+        # setting unmanaged for $service_name
+        completion = self.describe_service(service_name=service_name)
+        self._orchestrator_wait([completion])
+        raise_if_exception(completion)
+        services: List[ServiceDescription] = completion.result
+        specs = list()
+        for service in services:
+            spec = service.spec
+            spec.unmanaged = unmanaged_flag
+            specs.append(spec)
+        completion = self.apply(specs)
+        self._orchestrator_wait([completion])
+        raise_if_exception(completion)
+        if specs:
+            return HandleCommandResult(stdout=f"Changed <unmanaged> flag to <{unmanaged_flag}> for "
+                                              f"{[spec.service_name() for spec in specs]}")
+        else:
+            return HandleCommandResult(stdout=f"No specs found with the <service_name> -> {service_name}")
+
     @_cli_write_command(
         'orch apply osd',
-        'name=all_available_devices,type=CephBool,req=false',
+        'name=all_available_devices,type=CephBool,req=false '
+        'name=preview,type=CephBool,req=false '
+        'name=service_name,type=CephString,req=false '
+        'name=unmanaged,type=CephBool,req=false '
+        "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false",
         'Create OSD daemon(s) using a drive group spec')
-    def _apply_osd(self, all_available_devices=False, inbuf=None):
-        # type: (bool, Optional[str]) -> HandleCommandResult
+    def _apply_osd(self,
+                   all_available_devices: bool = False,
+                   preview: bool = False,
+                   service_name: Optional[str] = None,
+                   unmanaged: Optional[bool] = None,
+                   format: Optional[str] = 'plain',
+                   inbuf: Optional[str] = None) -> HandleCommandResult:
         """Apply DriveGroupSpecs to create OSDs"""
         usage = """
 Usage:
   ceph orch apply osd -i <json_file/yaml_file>
   ceph orch apply osd --use-all-devices
+  ceph orch apply osd --service-name <service_name> --preview
+  ceph orch apply osd --service-name <service_name> --unmanaged=True|False
 """
+
+        def print_preview(prev, format):
+            if format != 'plain':
+                return to_format(prev, format)
+            else:
+                table = PrettyTable(
+                    ['NAME', 'HOST', 'DATA', 'DB', 'WAL'],
+                    border=False)
+                table.align = 'l'
+                table.left_padding_width = 0
+                table.right_padding_width = 1
+                for data in prev:
+                    dg_name = data.get('drivegroup')
+                    hostname = data.get('host')
+                    for osd in data.get('data', {}).get('osds', []):
+                        db_path = '-'
+                        wal_path = '-'
+                        block_db = osd.get('block.db', {}).get('path')
+                        block_wal = osd.get('block.wal', {}).get('path')
+                        block_data = osd.get('data', {}).get('path', '')
+                        if not block_data:
+                            continue
+                        if block_db:
+                            db_path = data.get('data', {}).get('vg', {}).get('devices', [])
+                        if block_wal:
+                            wal_path = data.get('data', {}).get('wal_vg', {}).get('devices', [])
+                        table.add_row((dg_name, hostname, block_data, db_path, wal_path))
+                out = table.get_string()
+                if not out:
+                    out = "No pending deployments."
+                return out
+
+        if (inbuf or all_available_devices) and service_name:
+            # mutually exclusive
+            return HandleCommandResult(-errno.EINVAL, stderr=usage)
+
+        if preview and not (service_name or all_available_devices or inbuf):
+            # get all stored drivegroups and print
+            prev = self.preview_drivegroups()
+            return HandleCommandResult(stdout=print_preview(prev, format))
+
+        if service_name and preview:
+            # get specified drivegroup and print
+            prev = self.preview_drivegroups(service_name)
+            return HandleCommandResult(stdout=print_preview(prev, format))
+
+        if service_name and unmanaged is not None:
+            return self.set_unmanaged_flag(service_name, unmanaged)
+
         if not inbuf and not all_available_devices:
             return HandleCommandResult(-errno.EINVAL, stderr=usage)
         if inbuf:
@@ -454,7 +551,7 @@ Usage:
                 raise OrchestratorError('--all-available-devices cannot be combined with an osd spec')
             try:
                 drivegroups = yaml.load_all(inbuf)
-                dg_specs = [ServiceSpec.from_json(dg) for dg in drivegroups]
+                dg_specs = [DriveGroupSpec.from_json(dg) for dg in drivegroups]
             except ValueError as e:
                 msg = 'Failed to read JSON/YAML input: {}'.format(str(e)) + usage
                 return HandleCommandResult(-errno.EINVAL, stderr=msg)
@@ -467,10 +564,12 @@ Usage:
                 )
             ]
 
-        completion = self.apply_drivegroups(dg_specs)
-        self._orchestrator_wait([completion])
-        raise_if_exception(completion)
-        return HandleCommandResult(stdout=completion.result_str())
+        if not preview:
+            completion = self.apply_drivegroups(dg_specs)
+            self._orchestrator_wait([completion])
+            raise_if_exception(completion)
+        ret = self.preview_drivegroups(dg_specs=dg_specs)
+        return HandleCommandResult(stdout=print_preview(ret, format))
 
     @_cli_write_command(
         'orch daemon add osd',
@@ -571,6 +670,8 @@ Usage:
             completion = self.add_node_exporter(spec)
         elif daemon_type == 'prometheus':
             completion = self.add_prometheus(spec)
+        elif daemon_type == 'iscsi':
+            completion = self.add_iscsi(spec)
         else:
             raise OrchestratorValidationError(f'unknown daemon type `{daemon_type}`')
 
@@ -595,11 +696,11 @@ Usage:
 
     @_cli_write_command(
         'orch daemon add rgw',
-        'name=realm_name,type=CephString '
-        'name=zone_name,type=CephString '
+        'name=realm_name,type=CephString,req=false '
+        'name=zone_name,type=CephString,req=false '
         'name=placement,type=CephString,req=false',
         'Start RGW daemon(s)')
-    def _rgw_add(self, realm_name, zone_name, placement=None, inbuf=None):
+    def _rgw_add(self, realm_name=None, zone_name=None, placement=None, inbuf=None):
         usage = """
 Usage:
   ceph orch daemon rgw add -i <json_file>
@@ -611,17 +712,52 @@ Usage:
             except ValueError as e:
                 msg = 'Failed to read JSON input: {}'.format(str(e)) + usage
                 return HandleCommandResult(-errno.EINVAL, stderr=msg)
-        rgw_spec = RGWSpec(
-            rgw_realm=realm_name,
-            rgw_zone=zone_name,
-            placement=PlacementSpec.from_string(placement),
-        )
+        elif realm_name and zone_name:
+            rgw_spec = RGWSpec(
+                rgw_realm=realm_name,
+                rgw_zone=zone_name,
+                placement=PlacementSpec.from_string(placement))
+        else:
+            return HandleCommandResult(-errno.EINVAL, stderr=usage)
 
         completion = self.add_rgw(rgw_spec)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
         return HandleCommandResult(stdout=completion.result_str())
 
+    @_cli_write_command(
+        'orch daemon add iscsi',
+        'name=pool,type=CephString '
+        'name=fqdn_enabled,type=CephString,req=false '
+        'name=trusted_ip_list,type=CephString,req=false '
+        'name=placement,type=CephString,req=false',
+        'Start iscsi daemon(s)')
+    def _iscsi_add(self, pool, fqdn_enabled=None, trusted_ip_list=None, placement=None, inbuf=None):
+        usage = """
+        Usage:
+          ceph orch daemon add iscsi -i <json_file>
+          ceph orch daemon add iscsi <pool>
+                """
+        if inbuf:
+            try:
+                iscsi_spec = IscsiServiceSpec.from_json(json.loads(inbuf))
+            except ValueError as e:
+                msg = 'Failed to read JSON input: {}'.format(str(e)) + usage
+                return HandleCommandResult(-errno.EINVAL, stderr=msg)
+        else:
+            iscsi_spec = IscsiServiceSpec(
+                service_id='iscsi',
+                pool=pool,
+                fqdn_enabled=fqdn_enabled,
+                trusted_ip_list=trusted_ip_list,
+                placement=PlacementSpec.from_string(placement),
+            )
+
+        completion = self.add_iscsi(iscsi_spec)
+        self._orchestrator_wait([completion])
+        raise_if_exception(completion)
+        return HandleCommandResult(stdout=completion.result_str())
+
     @_cli_write_command(
         'orch daemon add nfs',
         "name=svc_arg,type=CephString "
@@ -697,17 +833,6 @@ Usage:
         raise_if_exception(completion)
         return HandleCommandResult(stdout=completion.result_str())
 
-    @_cli_write_command(
-        'orch spec dump',
-        'name=service_name,type=CephString,req=false',
-        desc='List all Service specs')
-    def _get_service_specs(self, service_name=None):
-        completion = self.list_specs(service_name=service_name)
-        self._orchestrator_wait([completion])
-        raise_if_exception(completion)
-        specs = completion.result
-        return HandleCommandResult(stdout=yaml.safe_dump_all(specs))
-
     @_cli_write_command(
         'orch apply',
         'name=service_type,type=CephChoices,strings=mon|mgr|rbd-mirror|crash|alertmanager|grafana|node-exporter|prometheus,req=false '
@@ -917,12 +1042,28 @@ Usage:
         c = TrivialReadCompletion(result=True)
         assert c.has_result
 
+    @staticmethod
+    def _upgrade_check_image_name(image, ceph_version):
+        """
+        >>> OrchestratorCli._upgrade_check_image_name('v15.2.0', None)
+        Traceback (most recent call last):
+        orchestrator._interface.OrchestratorValidationError: Error: unable to pull image name `v15.2.0`.
+          Maybe you meant `--ceph-version 15.2.0`?
+
+        """
+        if image and re.match(r'^v?\d+\.\d+\.\d+$', image) and ceph_version is None:
+            ver = image[1:] if image.startswith('v') else image
+            s =  f"Error: unable to pull image name `{image}`.\n" \
+                 f"  Maybe you meant `--ceph-version {ver}`?"
+            raise OrchestratorValidationError(s)
+
     @_cli_write_command(
         'orch upgrade check',
         'name=image,type=CephString,req=false '
         'name=ceph_version,type=CephString,req=false',
         desc='Check service versions vs available and target containers')
     def _upgrade_check(self, image=None, ceph_version=None):
+        self._upgrade_check_image_name(image, ceph_version)
         completion = self.upgrade_check(image=image, version=ceph_version)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
@@ -950,6 +1091,7 @@ Usage:
         'name=ceph_version,type=CephString,req=false',
         desc='Initiate upgrade')
     def _upgrade_start(self, image=None, ceph_version=None):
+        self._upgrade_check_image_name(image, ceph_version)
         completion = self.upgrade_start(image, ceph_version)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)