]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/mgr/mgr_test_case.py
4 from teuthology
import misc
5 from tasks
.ceph_test_case
import CephTestCase
7 # TODO move definition of CephCluster away from the CephFS stuff
8 from tasks
.cephfs
.filesystem
import CephCluster
11 log
= logging
.getLogger(__name__
)
14 class MgrCluster(CephCluster
):
15 def __init__(self
, ctx
):
16 super(MgrCluster
, self
).__init
__(ctx
)
17 self
.mgr_ids
= list(misc
.all_roles_of_type(ctx
.cluster
, 'mgr'))
19 if len(self
.mgr_ids
) == 0:
21 "This task requires at least one manager daemon")
23 self
.mgr_daemons
= dict(
24 [(mgr_id
, self
._ctx
.daemons
.get_daemon('mgr', mgr_id
)) for mgr_id
27 def mgr_stop(self
, mgr_id
):
28 self
.mgr_daemons
[mgr_id
].stop()
30 def mgr_fail(self
, mgr_id
):
31 self
.mon_manager
.raw_cluster_cmd("mgr", "fail", mgr_id
)
33 def mgr_restart(self
, mgr_id
):
34 self
.mgr_daemons
[mgr_id
].restart()
36 def get_mgr_map(self
):
38 self
.mon_manager
.raw_cluster_cmd("mgr", "dump", "--format=json-pretty"))
40 def get_active_id(self
):
41 return self
.get_mgr_map()["active_name"]
43 def get_standby_ids(self
):
44 return [s
['name'] for s
in self
.get_mgr_map()["standbys"]]
46 def set_module_conf(self
, module
, key
, val
):
47 self
.mon_manager
.raw_cluster_cmd("config", "set", "mgr",
52 def set_module_localized_conf(self
, module
, mgr_id
, key
, val
, force
):
53 cmd
= ["config", "set", "mgr",
54 "/".join(["mgr", module
, mgr_id
, key
]),
58 self
.mon_manager
.raw_cluster_cmd(*cmd
)
61 class MgrTestCase(CephTestCase
):
66 # Stop all the daemons
67 for daemon
in cls
.mgr_cluster
.mgr_daemons
.values():
70 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
71 cls
.mgr_cluster
.mgr_fail(mgr_id
)
73 # Unload all non-default plugins
74 loaded
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
75 "mgr", "module", "ls"))['enabled_modules']
76 unload_modules
= set(loaded
) - {"restful"}
78 for m
in unload_modules
:
79 cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
80 "mgr", "module", "disable", m
)
82 # Start all the daemons
83 for daemon
in cls
.mgr_cluster
.mgr_daemons
.values():
86 # Wait for an active to come up
87 cls
.wait_until_true(lambda: cls
.mgr_cluster
.get_active_id() != "",
90 expect_standbys
= set(cls
.mgr_cluster
.mgr_ids
) \
91 - {cls
.mgr_cluster
.get_active_id()}
93 lambda: set(cls
.mgr_cluster
.get_standby_ids()) == expect_standbys
,
98 # The test runner should have populated this
99 assert cls
.mgr_cluster
is not None
101 if len(cls
.mgr_cluster
.mgr_ids
) < cls
.MGRS_REQUIRED
:
103 "Only have {0} manager daemons, {1} are required".format(
104 len(cls
.mgr_cluster
.mgr_ids
), cls
.MGRS_REQUIRED
))
109 def _unload_module(cls
, module_name
):
111 enabled_modules
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
112 'mgr', 'module', 'ls'))['enabled_modules']
113 return module_name
not in enabled_modules
118 log
.info("Unloading Mgr module %s ...", module_name
)
119 cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd('mgr', 'module', 'disable', module_name
)
120 cls
.wait_until_true(is_disabled
, timeout
=30)
123 def _load_module(cls
, module_name
):
124 loaded
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
125 "mgr", "module", "ls"))['enabled_modules']
126 if module_name
in loaded
:
127 # The enable command is idempotent, but our wait for a restart
128 # isn't, so let's return now if it's already loaded
131 initial_mgr_map
= cls
.mgr_cluster
.get_mgr_map()
133 # check if the the module is configured as an always on module
134 mgr_daemons
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
137 for daemon
in mgr_daemons
:
138 if daemon
["name"] == initial_mgr_map
["active_name"]:
139 ceph_version
= daemon
["ceph_release"]
140 always_on
= initial_mgr_map
["always_on_modules"].get(ceph_version
, [])
141 if module_name
in always_on
:
144 log
.info("Loading Mgr module %s ...", module_name
)
145 initial_gid
= initial_mgr_map
['active_gid']
146 cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
147 "mgr", "module", "enable", module_name
, "--force")
149 # Wait for the module to load
151 mgr_map
= cls
.mgr_cluster
.get_mgr_map()
152 done
= mgr_map
['active_gid'] != initial_gid
and mgr_map
['available']
154 log
.info("Restarted after module load (new active {0}/{1})".format(
155 mgr_map
['active_name'], mgr_map
['active_gid']))
157 cls
.wait_until_true(has_restarted
, timeout
=30)
161 def _get_uri(cls
, service_name
):
162 # Little dict hack so that I can assign into this from
163 # the get_or_none function
164 mgr_map
= {'x': None}
167 mgr_map
['x'] = cls
.mgr_cluster
.get_mgr_map()
168 result
= mgr_map
['x']['services'].get(service_name
, None)
171 cls
.wait_until_true(lambda: _get_or_none() is not None, 30)
173 uri
= mgr_map
['x']['services'][service_name
]
175 log
.info("Found {0} at {1} (daemon {2}/{3})".format(
176 service_name
, uri
, mgr_map
['x']['active_name'],
177 mgr_map
['x']['active_gid']))
182 def _assign_ports(cls
, module_name
, config_name
, min_port
=7789):
184 To avoid the need to run lots of hosts in teuthology tests to
185 get different URLs per mgr, we will hand out different ports
188 This is already taken care of for us when running in a vstart
191 # Start handing out ports well above Ceph's range.
192 assign_port
= min_port
194 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
195 cls
.mgr_cluster
.mgr_stop(mgr_id
)
196 cls
.mgr_cluster
.mgr_fail(mgr_id
)
198 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
199 log
.info("Using port {0} for {1} on mgr.{2}".format(
200 assign_port
, module_name
, mgr_id
202 cls
.mgr_cluster
.set_module_localized_conf(module_name
, mgr_id
,
208 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
209 cls
.mgr_cluster
.mgr_restart(mgr_id
)
212 mgr_map
= cls
.mgr_cluster
.get_mgr_map()
213 done
= mgr_map
['available']
215 log
.info("Available after assign ports (new active {0}/{1})".format(
216 mgr_map
['active_name'], mgr_map
['active_gid']))
218 cls
.wait_until_true(is_available
, timeout
=30)