]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/pybind/mgr/cephadm/serve.py
import ceph quincy 17.2.4
[ceph.git] / ceph / src / pybind / mgr / cephadm / serve.py
index 207f3c6ba49f68a4795cd073a52793f7abe1f283..6ca94393ed2360b8cc58fb1a5cc4d705700352fe 100644 (file)
@@ -1,3 +1,4 @@
+import ipaddress
 import hashlib
 import json
 import logging
@@ -162,7 +163,7 @@ class CephadmServe:
                 )
                 ret, out, err = self.mgr.mon_command({
                     'prefix': 'config set',
-                    'who': f'osd/host:{host}',
+                    'who': f'osd/host:{host.split(".")[0]}',
                     'name': 'osd_memory_target',
                     'value': str(val),
                 })
@@ -171,11 +172,15 @@ class CephadmServe:
                         f'Unable to set osd_memory_target on {host} to {val}: {err}'
                     )
         else:
-            self.mgr.check_mon_command({
-                'prefix': 'config rm',
-                'who': f'osd/host:{host}',
-                'name': 'osd_memory_target',
-            })
+            # if osd memory autotuning is off, we don't want to remove these config
+            # options as users may be using them. Since there is no way to set autotuning
+            # on/off at a host level, best we can do is check if it is globally on.
+            if self.mgr.get_foreign_ceph_option('osd', 'osd_memory_target_autotune'):
+                self.mgr.check_mon_command({
+                    'prefix': 'config rm',
+                    'who': f'osd/host:{host.split(".")[0]}',
+                    'name': 'osd_memory_target',
+                })
         self.mgr.cache.update_autotune(host)
 
     def _refresh_hosts_and_daemons(self) -> None:
@@ -515,6 +520,7 @@ class CephadmServe:
                                             len(self.mgr.apply_spec_fails),
                                             warnings)
         self.mgr.update_watched_hosts()
+        self.mgr.tuned_profile_utils._write_all_tuned_profiles()
         return r
 
     def _apply_service_config(self, spec: ServiceSpec) -> None:
@@ -603,14 +609,19 @@ class CephadmServe:
 
         def matches_network(host):
             # type: (str) -> bool
-            # make sure we have 1 or more IPs for any of those networks on that
-            # host
-            for network in public_networks:
-                if len(self.mgr.cache.networks[host].get(network, [])) > 0:
-                    return True
+            # make sure the host has at least one network that belongs to some configured public network(s)
+            for pn in public_networks:
+                public_network = ipaddress.ip_network(pn)
+                for hn in self.mgr.cache.networks[host]:
+                    host_network = ipaddress.ip_network(hn)
+                    if host_network.overlaps(public_network):
+                        return True
+
+            host_networks = ','.join(self.mgr.cache.networks[host])
+            pub_networks = ','.join(public_networks)
             self.log.info(
-                f"Filtered out host {host}: does not belong to mon public_network"
-                f" ({','.join(public_networks)})"
+                f"Filtered out host {host}: does not belong to mon public_network(s): "
+                f" {pub_networks}, host network(s): {host_networks}"
             )
             return False
 
@@ -622,6 +633,7 @@ class CephadmServe:
             hosts=self.mgr.cache.get_non_draining_hosts() if spec.service_name(
             ) == 'agent' else self.mgr.cache.get_schedulable_hosts(),
             unreachable_hosts=self.mgr.cache.get_unreachable_hosts(),
+            draining_hosts=self.mgr.cache.get_draining_hosts(),
             daemons=daemons,
             networks=self.mgr.cache.networks,
             filter_new_host=(
@@ -1000,6 +1012,7 @@ class CephadmServe:
                     spec=ServiceSpec('mon', placement=pspec),
                     hosts=self.mgr.cache.get_schedulable_hosts(),
                     unreachable_hosts=self.mgr.cache.get_unreachable_hosts(),
+                    draining_hosts=self.mgr.cache.get_draining_hosts(),
                     daemons=[],
                     networks=self.mgr.cache.networks,
                 )
@@ -1030,6 +1043,7 @@ class CephadmServe:
                     spec=ServiceSpec('mon', placement=ks.placement),
                     hosts=self.mgr.cache.get_schedulable_hosts(),
                     unreachable_hosts=self.mgr.cache.get_unreachable_hosts(),
+                    draining_hosts=self.mgr.cache.get_draining_hosts(),
                     daemons=[],
                     networks=self.mgr.cache.networks,
                 )
@@ -1052,6 +1066,8 @@ class CephadmServe:
                             client_files: Dict[str, Dict[str, Tuple[int, int, int, bytes, str]]],
                             host: str) -> None:
         updated_files = False
+        if host in self.mgr.offline_hosts:
+            return
         old_files = self.mgr.cache.get_host_client_files(host).copy()
         for path, m in client_files.get(host, {}).items():
             mode, uid, gid, content, digest = m
@@ -1065,6 +1081,8 @@ class CephadmServe:
             self.mgr.cache.update_client_file(host, path, digest, mode, uid, gid)
             updated_files = True
         for path in old_files.keys():
+            if path == '/etc/ceph/ceph.conf':
+                continue
             self.log.info(f'Removing {host}:{path}')
             cmd = ['rm', '-f', path]
             self.mgr.ssh.check_execute_command(host, cmd)
@@ -1125,6 +1143,12 @@ class CephadmServe:
                 except AttributeError:
                     eca = None
 
+                if daemon_spec.service_name in self.mgr.spec_store:
+                    configs = self.mgr.spec_store[daemon_spec.service_name].spec.custom_configs
+                    if configs is not None:
+                        daemon_spec.final_config.update(
+                            {'custom_config_files': [c.to_json() for c in configs]})
+
                 if self.mgr.cache.host_needs_registry_login(daemon_spec.host) and self.mgr.registry_url:
                     await self._registry_login(daemon_spec.host, json.loads(str(self.mgr.get_store('registry_credentials'))))