]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/mgr/mgr_test_case.py
8687b5f29002026de39709cbf9354b35f871248e
4 from unittest
import SkipTest
6 from teuthology
import misc
7 from tasks
.ceph_test_case
import CephTestCase
9 # TODO move definition of CephCluster away from the CephFS stuff
10 from tasks
.cephfs
.filesystem
import CephCluster
13 log
= logging
.getLogger(__name__
)
16 class MgrCluster(CephCluster
):
17 def __init__(self
, ctx
):
18 super(MgrCluster
, self
).__init
__(ctx
)
19 self
.mgr_ids
= list(misc
.all_roles_of_type(ctx
.cluster
, 'mgr'))
21 if len(self
.mgr_ids
) == 0:
23 "This task requires at least one manager daemon")
25 self
.mgr_daemons
= dict(
26 [(mgr_id
, self
._ctx
.daemons
.get_daemon('mgr', mgr_id
)) for mgr_id
29 def mgr_stop(self
, mgr_id
):
30 self
.mgr_daemons
[mgr_id
].stop()
32 def mgr_fail(self
, mgr_id
):
33 self
.mon_manager
.raw_cluster_cmd("mgr", "fail", mgr_id
)
35 def mgr_restart(self
, mgr_id
):
36 self
.mgr_daemons
[mgr_id
].restart()
38 def get_mgr_map(self
):
40 self
.mon_manager
.raw_cluster_cmd("mgr", "dump", "--format=json-pretty"))
42 def get_active_id(self
):
43 return self
.get_mgr_map()["active_name"]
45 def get_standby_ids(self
):
46 return [s
['name'] for s
in self
.get_mgr_map()["standbys"]]
48 def set_module_conf(self
, module
, key
, val
):
49 self
.mon_manager
.raw_cluster_cmd("config", "set", "mgr",
54 def set_module_localized_conf(self
, module
, mgr_id
, key
, val
, force
):
55 cmd
= ["config", "set", "mgr",
56 "/".join(["mgr", module
, mgr_id
, key
]),
60 self
.mon_manager
.raw_cluster_cmd(*cmd
)
63 class MgrTestCase(CephTestCase
):
68 # Stop all the daemons
69 for daemon
in cls
.mgr_cluster
.mgr_daemons
.values():
72 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
73 cls
.mgr_cluster
.mgr_fail(mgr_id
)
75 # Unload all non-default plugins
76 loaded
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
77 "mgr", "module", "ls"))['enabled_modules']
78 unload_modules
= set(loaded
) - {"cephadm", "restful"}
80 for m
in unload_modules
:
81 cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
82 "mgr", "module", "disable", m
)
84 # Start all the daemons
85 for daemon
in cls
.mgr_cluster
.mgr_daemons
.values():
88 # Wait for an active to come up
89 cls
.wait_until_true(lambda: cls
.mgr_cluster
.get_active_id() != "",
92 expect_standbys
= set(cls
.mgr_cluster
.mgr_ids
) \
93 - {cls
.mgr_cluster
.get_active_id()}
95 lambda: set(cls
.mgr_cluster
.get_standby_ids()) == expect_standbys
,
100 # The test runner should have populated this
101 assert cls
.mgr_cluster
is not None
103 if len(cls
.mgr_cluster
.mgr_ids
) < cls
.MGRS_REQUIRED
:
105 "Only have {0} manager daemons, {1} are required".format(
106 len(cls
.mgr_cluster
.mgr_ids
), cls
.MGRS_REQUIRED
))
111 def _unload_module(cls
, module_name
):
113 enabled_modules
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
114 'mgr', 'module', 'ls'))['enabled_modules']
115 return module_name
not in enabled_modules
120 log
.info("Unloading Mgr module %s ...", module_name
)
121 cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd('mgr', 'module', 'disable', module_name
)
122 cls
.wait_until_true(is_disabled
, timeout
=30)
125 def _load_module(cls
, module_name
):
126 loaded
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
127 "mgr", "module", "ls"))['enabled_modules']
128 if module_name
in loaded
:
129 # The enable command is idempotent, but our wait for a restart
130 # isn't, so let's return now if it's already loaded
133 initial_mgr_map
= cls
.mgr_cluster
.get_mgr_map()
135 # check if the the module is configured as an always on module
136 mgr_daemons
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
139 for daemon
in mgr_daemons
:
140 if daemon
["name"] == initial_mgr_map
["active_name"]:
141 ceph_version
= daemon
["ceph_release"]
142 always_on
= initial_mgr_map
["always_on_modules"].get(ceph_version
, [])
143 if module_name
in always_on
:
146 log
.info("Loading Mgr module %s ...", module_name
)
147 initial_gid
= initial_mgr_map
['active_gid']
148 cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
149 "mgr", "module", "enable", module_name
, "--force")
151 # Wait for the module to load
153 mgr_map
= cls
.mgr_cluster
.get_mgr_map()
154 done
= mgr_map
['active_gid'] != initial_gid
and mgr_map
['available']
156 log
.info("Restarted after module load (new active {0}/{1})".format(
157 mgr_map
['active_name'], mgr_map
['active_gid']))
159 cls
.wait_until_true(has_restarted
, timeout
=30)
163 def _get_uri(cls
, service_name
):
164 # Little dict hack so that I can assign into this from
165 # the get_or_none function
166 mgr_map
= {'x': None}
169 mgr_map
['x'] = cls
.mgr_cluster
.get_mgr_map()
170 result
= mgr_map
['x']['services'].get(service_name
, None)
173 cls
.wait_until_true(lambda: _get_or_none() is not None, 30)
175 uri
= mgr_map
['x']['services'][service_name
]
177 log
.info("Found {0} at {1} (daemon {2}/{3})".format(
178 service_name
, uri
, mgr_map
['x']['active_name'],
179 mgr_map
['x']['active_gid']))
184 def _assign_ports(cls
, module_name
, config_name
, min_port
=7789):
186 To avoid the need to run lots of hosts in teuthology tests to
187 get different URLs per mgr, we will hand out different ports
190 This is already taken care of for us when running in a vstart
193 # Start handing out ports well above Ceph's range.
194 assign_port
= min_port
196 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
197 cls
.mgr_cluster
.mgr_stop(mgr_id
)
198 cls
.mgr_cluster
.mgr_fail(mgr_id
)
200 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
201 log
.info("Using port {0} for {1} on mgr.{2}".format(
202 assign_port
, module_name
, mgr_id
204 cls
.mgr_cluster
.set_module_localized_conf(module_name
, mgr_id
,
210 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
211 cls
.mgr_cluster
.mgr_restart(mgr_id
)
214 mgr_map
= cls
.mgr_cluster
.get_mgr_map()
215 done
= mgr_map
['available']
217 log
.info("Available after assign ports (new active {0}/{1})".format(
218 mgr_map
['active_name'], mgr_map
['active_gid']))
220 cls
.wait_until_true(is_available
, timeout
=30)