]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/mgr/dashboard/test_health.py
update ceph source to reef 18.1.2
[ceph.git] / ceph / qa / tasks / mgr / dashboard / test_health.py
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3
4 from .helper import (DashboardTestCase, JAny, JLeaf, JList, JObj,
5 addrvec_schema, module_options_schema)
6
7
8 class HealthTest(DashboardTestCase):
9 CEPHFS = True
10
11 __pg_info_schema = JObj({
12 'object_stats': JObj({
13 'num_objects': int,
14 'num_object_copies': int,
15 'num_objects_degraded': int,
16 'num_objects_misplaced': int,
17 'num_objects_unfound': int
18 }),
19 'pgs_per_osd': float,
20 'statuses': JObj({}, allow_unknown=True, unknown_schema=int)
21 })
22
23 __mdsmap_schema = JObj({
24 'session_autoclose': int,
25 'balancer': str,
26 'bal_rank_mask': str,
27 'up': JObj({}, allow_unknown=True),
28 'last_failure_osd_epoch': int,
29 'in': JList(int),
30 'last_failure': int,
31 'max_file_size': int,
32 'explicitly_allowed_features': int,
33 'damaged': JList(int),
34 'tableserver': int,
35 'failed': JList(int),
36 'metadata_pool': int,
37 'epoch': int,
38 'stopped': JList(int),
39 'max_mds': int,
40 'compat': JObj({
41 'compat': JObj({}, allow_unknown=True),
42 'ro_compat': JObj({}, allow_unknown=True),
43 'incompat': JObj({}, allow_unknown=True)
44 }),
45 'required_client_features': JObj({}, allow_unknown=True),
46 'data_pools': JList(int),
47 'info': JObj({}, allow_unknown=True),
48 'fs_name': str,
49 'created': str,
50 'standby_count_wanted': int,
51 'enabled': bool,
52 'modified': str,
53 'session_timeout': int,
54 'flags': int,
55 'flags_state': JObj({
56 'joinable': bool,
57 'allow_snaps': bool,
58 'allow_multimds_snaps': bool,
59 'allow_standby_replay': bool,
60 'refuse_client_session': bool
61 }),
62 'ever_allowed_features': int,
63 'root': int
64 })
65
66 def test_minimal_health(self):
67 data = self._get('/api/health/minimal')
68 self.assertStatus(200)
69 schema = JObj({
70 'client_perf': JObj({
71 'read_bytes_sec': int,
72 'read_op_per_sec': int,
73 'recovering_bytes_per_sec': int,
74 'write_bytes_sec': int,
75 'write_op_per_sec': int
76 }),
77 'df': JObj({
78 'stats': JObj({
79 'total_avail_bytes': int,
80 'total_bytes': int,
81 'total_used_raw_bytes': int,
82 })
83 }),
84 'fs_map': JObj({
85 'filesystems': JList(
86 JObj({
87 'mdsmap': self.__mdsmap_schema
88 }),
89 ),
90 'standbys': JList(JObj({}, allow_unknown=True)),
91 }),
92 'health': JObj({
93 'checks': JList(JObj({}, allow_unknown=True)),
94 'mutes': JList(JObj({}, allow_unknown=True)),
95 'status': str,
96 }),
97 'hosts': int,
98 'iscsi_daemons': JObj({
99 'up': int,
100 'down': int
101 }),
102 'mgr_map': JObj({
103 'active_name': str,
104 'standbys': JList(JLeaf(dict))
105 }),
106 'mon_status': JObj({
107 'monmap': JObj({
108 'mons': JList(JLeaf(dict)),
109 }),
110 'quorum': JList(int)
111 }),
112 'osd_map': JObj({
113 'osds': JList(
114 JObj({
115 'in': int,
116 'up': int,
117 'state': JList(str)
118 })),
119 }),
120 'pg_info': self.__pg_info_schema,
121 'pools': JList(JLeaf(dict)),
122 'rgw': int,
123 'scrub_status': str
124 })
125 self.assertSchema(data, schema)
126
127 def test_full_health(self):
128 data = self._get('/api/health/full')
129 self.assertStatus(200)
130 module_info_schema = JObj({
131 'can_run': bool,
132 'error_string': str,
133 'name': str,
134 'module_options': module_options_schema
135 })
136 schema = JObj({
137 'client_perf': JObj({
138 'read_bytes_sec': int,
139 'read_op_per_sec': int,
140 'recovering_bytes_per_sec': int,
141 'write_bytes_sec': int,
142 'write_op_per_sec': int
143 }),
144 'df': JObj({
145 'pools': JList(JObj({
146 'stats': JObj({
147 'stored': int,
148 'stored_data': int,
149 'stored_omap': int,
150 'objects': int,
151 'kb_used': int,
152 'bytes_used': int,
153 'data_bytes_used': int,
154 'omap_bytes_used': int,
155 'percent_used': float,
156 'max_avail': int,
157 'quota_objects': int,
158 'quota_bytes': int,
159 'dirty': int,
160 'rd': int,
161 'rd_bytes': int,
162 'wr': int,
163 'wr_bytes': int,
164 'compress_bytes_used': int,
165 'compress_under_bytes': int,
166 'stored_raw': int,
167 'avail_raw': int
168 }),
169 'name': str,
170 'id': int
171 })),
172 'stats': JObj({
173 'total_avail_bytes': int,
174 'total_bytes': int,
175 'total_used_bytes': int,
176 'total_used_raw_bytes': int,
177 'total_used_raw_ratio': float,
178 'num_osds': int,
179 'num_per_pool_osds': int,
180 'num_per_pool_omap_osds': int
181 })
182 }),
183 'fs_map': JObj({
184 'compat': JObj({
185 'compat': JObj({}, allow_unknown=True, unknown_schema=str),
186 'incompat': JObj(
187 {}, allow_unknown=True, unknown_schema=str),
188 'ro_compat': JObj(
189 {}, allow_unknown=True, unknown_schema=str)
190 }),
191 'default_fscid': int,
192 'epoch': int,
193 'feature_flags': JObj(
194 {}, allow_unknown=True, unknown_schema=bool),
195 'filesystems': JList(
196 JObj({
197 'id': int,
198 'mdsmap': self.__mdsmap_schema
199 }),
200 ),
201 'standbys': JList(JObj({}, allow_unknown=True)),
202 }),
203 'health': JObj({
204 'checks': JList(JObj({}, allow_unknown=True)),
205 'mutes': JList(JObj({}, allow_unknown=True)),
206 'status': str,
207 }),
208 'hosts': int,
209 'iscsi_daemons': JObj({
210 'up': int,
211 'down': int
212 }),
213 'mgr_map': JObj({
214 'active_addr': str,
215 'active_addrs': JObj({
216 'addrvec': addrvec_schema
217 }),
218 'active_change': str, # timestamp
219 'active_mgr_features': int,
220 'active_gid': int,
221 'active_name': str,
222 'always_on_modules': JObj({}, allow_unknown=True),
223 'available': bool,
224 'available_modules': JList(module_info_schema),
225 'epoch': int,
226 'modules': JList(str),
227 'services': JObj(
228 {'dashboard': str}, # This module should always be present
229 allow_unknown=True, unknown_schema=str
230 ),
231 'standbys': JList(JObj({
232 'available_modules': JList(module_info_schema),
233 'gid': int,
234 'name': str,
235 'mgr_features': int
236 }, allow_unknown=True))
237 }, allow_unknown=True),
238 'mon_status': JObj({
239 'election_epoch': int,
240 'extra_probe_peers': JList(JAny(none=True)),
241 'feature_map': JObj(
242 {}, allow_unknown=True, unknown_schema=JList(JObj({
243 'features': str,
244 'num': int,
245 'release': str
246 }))
247 ),
248 'features': JObj({
249 'quorum_con': str,
250 'quorum_mon': JList(str),
251 'required_con': str,
252 'required_mon': JList(str)
253 }),
254 'monmap': JObj({
255 # @TODO: expand on monmap schema
256 'mons': JList(JLeaf(dict)),
257 }, allow_unknown=True),
258 'name': str,
259 'outside_quorum': JList(int),
260 'quorum': JList(int),
261 'quorum_age': int,
262 'rank': int,
263 'state': str,
264 # @TODO: What type should be expected here?
265 'sync_provider': JList(JAny(none=True)),
266 'stretch_mode': bool
267 }),
268 'osd_map': JObj({
269 # @TODO: define schema for crush map and osd_metadata, among
270 # others
271 'osds': JList(
272 JObj({
273 'in': int,
274 'up': int,
275 }, allow_unknown=True)),
276 }, allow_unknown=True),
277 'pg_info': self.__pg_info_schema,
278 'pools': JList(JLeaf(dict)),
279 'rgw': int,
280 'scrub_status': str
281 })
282 self.assertSchema(data, schema)
283
284 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
285 self.assertEqual(len(cluster_pools), len(data['pools']))
286 for pool in data['pools']:
287 self.assertIn(pool['pool_name'], cluster_pools)
288
289 @DashboardTestCase.RunAs('test', 'test', ['pool-manager'])
290 def test_health_permissions(self):
291 data = self._get('/api/health/full')
292 self.assertStatus(200)
293
294 schema = JObj({
295 'client_perf': JObj({}, allow_unknown=True),
296 'df': JObj({}, allow_unknown=True),
297 'health': JObj({
298 'checks': JList(JObj({}, allow_unknown=True)),
299 'mutes': JList(JObj({}, allow_unknown=True)),
300 'status': str
301 }),
302 'pools': JList(JLeaf(dict)),
303 })
304 self.assertSchema(data, schema)
305
306 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
307 self.assertEqual(len(cluster_pools), len(data['pools']))
308 for pool in data['pools']:
309 self.assertIn(pool['pool_name'], cluster_pools)