]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | |
2 | from unittest import case | |
3 | import json | |
4 | ||
5 | from teuthology import misc | |
6 | from tasks.ceph_test_case import CephTestCase | |
7 | ||
8 | # TODO move definition of CephCluster | |
9 | from tasks.cephfs.filesystem import CephCluster | |
10 | ||
11 | ||
12 | class MgrCluster(CephCluster): | |
13 | def __init__(self, ctx): | |
14 | super(MgrCluster, self).__init__(ctx) | |
15 | self.mgr_ids = list(misc.all_roles_of_type(ctx.cluster, 'mgr')) | |
16 | ||
17 | if len(self.mgr_ids) == 0: | |
18 | raise RuntimeError( | |
19 | "This task requires at least one manager daemon") | |
20 | ||
21 | self.mgr_daemons = dict( | |
22 | [(mgr_id, self._ctx.daemons.get_daemon('mgr', mgr_id)) for mgr_id | |
23 | in self.mgr_ids]) | |
24 | ||
25 | def mgr_stop(self, mgr_id): | |
26 | self.mgr_daemons[mgr_id].stop() | |
27 | ||
28 | def mgr_fail(self, mgr_id): | |
29 | self.mon_manager.raw_cluster_cmd("mgr", "fail", mgr_id) | |
30 | ||
31 | def mgr_restart(self, mgr_id): | |
32 | self.mgr_daemons[mgr_id].restart() | |
33 | ||
34 | def get_mgr_map(self): | |
35 | status = json.loads( | |
36 | self.mon_manager.raw_cluster_cmd("status", "--format=json-pretty")) | |
37 | ||
38 | return status["mgrmap"] | |
39 | ||
40 | def get_active_id(self): | |
41 | return self.get_mgr_map()["active_name"] | |
42 | ||
43 | def get_standby_ids(self): | |
44 | return [s['name'] for s in self.get_mgr_map()["standbys"]] | |
45 | ||
46 | ||
47 | class MgrTestCase(CephTestCase): | |
48 | MGRS_REQUIRED = 1 | |
49 | ||
50 | def setUp(self): | |
51 | super(MgrTestCase, self).setUp() | |
52 | ||
53 | # The test runner should have populated this | |
54 | assert self.mgr_cluster is not None | |
55 | ||
56 | if len(self.mgr_cluster.mgr_ids) < self.MGRS_REQUIRED: | |
57 | raise case.SkipTest("Only have {0} manager daemons, " | |
58 | "{1} are required".format( | |
59 | len(self.mgr_cluster.mgr_ids), self.MGRS_REQUIRED)) | |
60 | ||
61 | # Restart all the daemons | |
62 | for daemon in self.mgr_cluster.mgr_daemons.values(): | |
63 | daemon.stop() | |
64 | ||
65 | for mgr_id in self.mgr_cluster.mgr_ids: | |
66 | self.mgr_cluster.mgr_fail(mgr_id) | |
67 | ||
68 | for daemon in self.mgr_cluster.mgr_daemons.values(): | |
69 | daemon.restart() | |
70 | ||
71 | # Wait for an active to come up | |
72 | self.wait_until_true(lambda: self.mgr_cluster.get_active_id() != "", | |
73 | timeout=20) | |
74 | ||
75 | expect_standbys = set(self.mgr_cluster.mgr_ids) \ | |
76 | - {self.mgr_cluster.get_active_id()} | |
77 | self.wait_until_true( | |
78 | lambda: set(self.mgr_cluster.get_standby_ids()) == expect_standbys, | |
79 | timeout=20) |