]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/mgr/mgr_test_case.py
2 from unittest
import case
6 from teuthology
import misc
7 from tasks
.ceph_test_case
import CephTestCase
9 # TODO move definition of CephCluster away from the CephFS stuff
10 from tasks
.cephfs
.filesystem
import CephCluster
13 log
= logging
.getLogger(__name__
)
16 class MgrCluster(CephCluster
):
17 def __init__(self
, ctx
):
18 super(MgrCluster
, self
).__init
__(ctx
)
19 self
.mgr_ids
= list(misc
.all_roles_of_type(ctx
.cluster
, 'mgr'))
21 if len(self
.mgr_ids
) == 0:
23 "This task requires at least one manager daemon")
25 self
.mgr_daemons
= dict(
26 [(mgr_id
, self
._ctx
.daemons
.get_daemon('mgr', mgr_id
)) for mgr_id
29 def mgr_stop(self
, mgr_id
):
30 self
.mgr_daemons
[mgr_id
].stop()
32 def mgr_fail(self
, mgr_id
):
33 self
.mon_manager
.raw_cluster_cmd("mgr", "fail", mgr_id
)
35 def mgr_restart(self
, mgr_id
):
36 self
.mgr_daemons
[mgr_id
].restart()
38 def get_mgr_map(self
):
40 self
.mon_manager
.raw_cluster_cmd("status", "--format=json-pretty"))
42 return status
["mgrmap"]
44 def get_active_id(self
):
45 return self
.get_mgr_map()["active_name"]
47 def get_standby_ids(self
):
48 return [s
['name'] for s
in self
.get_mgr_map()["standbys"]]
50 def set_module_conf(self
, module
, key
, val
):
51 self
.mon_manager
.raw_cluster_cmd("config-key", "set",
56 def set_module_localized_conf(self
, module
, mgr_id
, key
, val
):
57 self
.mon_manager
.raw_cluster_cmd("config-key", "set",
58 "mgr/{0}/{1}/{2}".format(
63 class MgrTestCase(CephTestCase
):
67 super(MgrTestCase
, self
).setUp()
69 # The test runner should have populated this
70 assert self
.mgr_cluster
is not None
72 if len(self
.mgr_cluster
.mgr_ids
) < self
.MGRS_REQUIRED
:
73 raise case
.SkipTest("Only have {0} manager daemons, "
74 "{1} are required".format(
75 len(self
.mgr_cluster
.mgr_ids
), self
.MGRS_REQUIRED
))
77 # Restart all the daemons
78 for daemon
in self
.mgr_cluster
.mgr_daemons
.values():
81 for mgr_id
in self
.mgr_cluster
.mgr_ids
:
82 self
.mgr_cluster
.mgr_fail(mgr_id
)
84 for daemon
in self
.mgr_cluster
.mgr_daemons
.values():
87 # Wait for an active to come up
88 self
.wait_until_true(lambda: self
.mgr_cluster
.get_active_id() != "",
91 expect_standbys
= set(self
.mgr_cluster
.mgr_ids
) \
92 - {self
.mgr_cluster
.get_active_id()}
94 lambda: set(self
.mgr_cluster
.get_standby_ids()) == expect_standbys
,
97 def _load_module(self
, module_name
):
98 loaded
= json
.loads(self
.mgr_cluster
.mon_manager
.raw_cluster_cmd(
99 "mgr", "module", "ls"))['enabled_modules']
100 if module_name
in loaded
:
101 # The enable command is idempotent, but our wait for a restart
102 # isn't, so let's return now if it's already loaded
105 initial_gid
= self
.mgr_cluster
.get_mgr_map()['active_gid']
106 self
.mgr_cluster
.mon_manager
.raw_cluster_cmd("mgr", "module", "enable",
109 # Wait for the module to load
111 mgr_map
= self
.mgr_cluster
.get_mgr_map()
112 done
= mgr_map
['active_gid'] != initial_gid
and mgr_map
['available']
114 log
.info("Restarted after module load (new active {0}/{1})".format(
115 mgr_map
['active_name'] , mgr_map
['active_gid']))
117 self
.wait_until_true(has_restarted
, timeout
=30)
120 def _get_uri(self
, service_name
):
121 # Little dict hack so that I can assign into this from
122 # the get_or_none function
123 mgr_map
= {'x': None}
126 mgr_map
['x'] = self
.mgr_cluster
.get_mgr_map()
127 result
= mgr_map
['x']['services'].get(service_name
, None)
130 self
.wait_until_true(lambda: _get_or_none() is not None, 30)
132 uri
= mgr_map
['x']['services'][service_name
]
134 log
.info("Found {0} at {1} (daemon {2}/{3})".format(
135 service_name
, uri
, mgr_map
['x']['active_name'],
136 mgr_map
['x']['active_gid']))
141 def _assign_ports(self
, module_name
, config_name
, min_port
=7789):
143 To avoid the need to run lots of hosts in teuthology tests to
144 get different URLs per mgr, we will hand out different ports
147 This is already taken care of for us when running in a vstart
150 # Start handing out ports well above Ceph's range.
151 assign_port
= min_port
153 for mgr_id
in self
.mgr_cluster
.mgr_ids
:
154 self
.mgr_cluster
.mgr_stop(mgr_id
)
155 self
.mgr_cluster
.mgr_fail(mgr_id
)
157 for mgr_id
in self
.mgr_cluster
.mgr_ids
:
158 log
.info("Using port {0} for {1} on mgr.{2}".format(
159 assign_port
, module_name
, mgr_id
161 self
.mgr_cluster
.set_module_localized_conf(module_name
, mgr_id
,
166 for mgr_id
in self
.mgr_cluster
.mgr_ids
:
167 self
.mgr_cluster
.mgr_restart(mgr_id
)
170 mgr_map
= self
.mgr_cluster
.get_mgr_map()
171 done
= mgr_map
['available']
173 log
.info("Available after assign ports (new active {0}/{1})".format(
174 mgr_map
['active_name'] , mgr_map
['active_gid']))
176 self
.wait_until_true(is_available
, timeout
=30)