]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/mgr/mgr_test_case.py
2 from unittest
import case
6 from teuthology
import misc
7 from tasks
.ceph_test_case
import CephTestCase
9 # TODO move definition of CephCluster away from the CephFS stuff
10 from tasks
.cephfs
.filesystem
import CephCluster
13 log
= logging
.getLogger(__name__
)
16 class MgrCluster(CephCluster
):
17 def __init__(self
, ctx
):
18 super(MgrCluster
, self
).__init
__(ctx
)
19 self
.mgr_ids
= list(misc
.all_roles_of_type(ctx
.cluster
, 'mgr'))
21 if len(self
.mgr_ids
) == 0:
23 "This task requires at least one manager daemon")
25 self
.mgr_daemons
= dict(
26 [(mgr_id
, self
._ctx
.daemons
.get_daemon('mgr', mgr_id
)) for mgr_id
29 def mgr_stop(self
, mgr_id
):
30 self
.mgr_daemons
[mgr_id
].stop()
32 def mgr_fail(self
, mgr_id
):
33 self
.mon_manager
.raw_cluster_cmd("mgr", "fail", mgr_id
)
35 def mgr_restart(self
, mgr_id
):
36 self
.mgr_daemons
[mgr_id
].restart()
38 def get_mgr_map(self
):
40 self
.mon_manager
.raw_cluster_cmd("status", "--format=json-pretty"))
42 return status
["mgrmap"]
44 def get_active_id(self
):
45 return self
.get_mgr_map()["active_name"]
47 def get_standby_ids(self
):
48 return [s
['name'] for s
in self
.get_mgr_map()["standbys"]]
50 def set_module_conf(self
, module
, key
, val
):
51 self
.mon_manager
.raw_cluster_cmd("config", "set", "mgr",
56 def set_module_localized_conf(self
, module
, mgr_id
, key
, val
):
57 self
.mon_manager
.raw_cluster_cmd("config", "set", "mgr",
58 "mgr/{0}/{1}/{2}".format(
63 class MgrTestCase(CephTestCase
):
68 # Stop all the daemons
69 for daemon
in cls
.mgr_cluster
.mgr_daemons
.values():
72 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
73 cls
.mgr_cluster
.mgr_fail(mgr_id
)
75 # Unload all non-default plugins
76 loaded
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
77 "mgr", "module", "ls"))['enabled_modules']
78 unload_modules
= set(loaded
) - {"restful"}
80 for m
in unload_modules
:
81 cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
82 "mgr", "module", "disable", m
)
84 # Start all the daemons
85 for daemon
in cls
.mgr_cluster
.mgr_daemons
.values():
88 # Wait for an active to come up
89 cls
.wait_until_true(lambda: cls
.mgr_cluster
.get_active_id() != "",
92 expect_standbys
= set(cls
.mgr_cluster
.mgr_ids
) \
93 - {cls
.mgr_cluster
.get_active_id()}
95 lambda: set(cls
.mgr_cluster
.get_standby_ids()) == expect_standbys
,
100 # The test runner should have populated this
101 assert cls
.mgr_cluster
is not None
103 if len(cls
.mgr_cluster
.mgr_ids
) < cls
.MGRS_REQUIRED
:
104 raise case
.SkipTest("Only have {0} manager daemons, "
105 "{1} are required".format(
106 len(cls
.mgr_cluster
.mgr_ids
), cls
.MGRS_REQUIRED
))
111 def _load_module(cls
, module_name
):
112 loaded
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
113 "mgr", "module", "ls"))['enabled_modules']
114 if module_name
in loaded
:
115 # The enable command is idempotent, but our wait for a restart
116 # isn't, so let's return now if it's already loaded
119 initial_mgr_map
= cls
.mgr_cluster
.get_mgr_map()
121 # check if the the module is configured as an always on module
122 mgr_daemons
= json
.loads(cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
125 for daemon
in mgr_daemons
:
126 if daemon
["name"] == initial_mgr_map
["active_name"]:
127 ceph_version
= daemon
["ceph_release"]
128 always_on
= initial_mgr_map
["always_on_modules"].get(ceph_version
, [])
129 if module_name
in always_on
:
132 initial_gid
= initial_mgr_map
['active_gid']
133 cls
.mgr_cluster
.mon_manager
.raw_cluster_cmd("mgr", "module", "enable",
134 module_name
, "--force")
136 # Wait for the module to load
138 mgr_map
= cls
.mgr_cluster
.get_mgr_map()
139 done
= mgr_map
['active_gid'] != initial_gid
and mgr_map
['available']
141 log
.info("Restarted after module load (new active {0}/{1})".format(
142 mgr_map
['active_name'] , mgr_map
['active_gid']))
144 cls
.wait_until_true(has_restarted
, timeout
=30)
148 def _get_uri(cls
, service_name
):
149 # Little dict hack so that I can assign into this from
150 # the get_or_none function
151 mgr_map
= {'x': None}
154 mgr_map
['x'] = cls
.mgr_cluster
.get_mgr_map()
155 result
= mgr_map
['x']['services'].get(service_name
, None)
158 cls
.wait_until_true(lambda: _get_or_none() is not None, 30)
160 uri
= mgr_map
['x']['services'][service_name
]
162 log
.info("Found {0} at {1} (daemon {2}/{3})".format(
163 service_name
, uri
, mgr_map
['x']['active_name'],
164 mgr_map
['x']['active_gid']))
169 def _assign_ports(cls
, module_name
, config_name
, min_port
=7789):
171 To avoid the need to run lots of hosts in teuthology tests to
172 get different URLs per mgr, we will hand out different ports
175 This is already taken care of for us when running in a vstart
178 # Start handing out ports well above Ceph's range.
179 assign_port
= min_port
181 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
182 cls
.mgr_cluster
.mgr_stop(mgr_id
)
183 cls
.mgr_cluster
.mgr_fail(mgr_id
)
185 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
186 log
.info("Using port {0} for {1} on mgr.{2}".format(
187 assign_port
, module_name
, mgr_id
189 cls
.mgr_cluster
.set_module_localized_conf(module_name
, mgr_id
,
194 for mgr_id
in cls
.mgr_cluster
.mgr_ids
:
195 cls
.mgr_cluster
.mgr_restart(mgr_id
)
198 mgr_map
= cls
.mgr_cluster
.get_mgr_map()
199 done
= mgr_map
['available']
201 log
.info("Available after assign ports (new active {0}/{1})".format(
202 mgr_map
['active_name'], mgr_map
['active_gid']))
204 cls
.wait_until_true(is_available
, timeout
=30)