]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/mgr/dashboard/test_pool.py
bump version to 18.2.2-pve1
[ceph.git] / ceph / qa / tasks / mgr / dashboard / test_pool.py
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3
4 import logging
5 import time
6 from contextlib import contextmanager
7
8 from .helper import DashboardTestCase, JAny, JList, JObj, JUnion
9
10 log = logging.getLogger(__name__)
11
12
13 class PoolTest(DashboardTestCase):
14 AUTH_ROLES = ['pool-manager']
15
16 pool_schema = JObj(sub_elems={
17 'pool_name': str,
18 'type': str,
19 'application_metadata': JList(str),
20 'flags': int,
21 'flags_names': str,
22 }, allow_unknown=True)
23
24 pool_list_stat_schema = JObj(sub_elems={
25 'latest': JUnion([int, float]),
26 'rate': float,
27 'rates': JList(JAny(none=False)),
28 })
29
30 pool_list_stats_schema = JObj(sub_elems={
31 'avail_raw': pool_list_stat_schema,
32 'bytes_used': pool_list_stat_schema,
33 'max_avail': pool_list_stat_schema,
34 'percent_used': pool_list_stat_schema,
35 'rd_bytes': pool_list_stat_schema,
36 'wr_bytes': pool_list_stat_schema,
37 'rd': pool_list_stat_schema,
38 'wr': pool_list_stat_schema,
39 }, allow_unknown=True)
40
41 pool_rbd_conf_schema = JList(JObj(sub_elems={
42 'name': str,
43 'value': str,
44 'source': int
45 }))
46
47 @contextmanager
48 def __yield_pool(self, name=None, data=None, deletion_name=None):
49 """
50 Use either just a name or whole description of a pool to create one.
51 This also validates the correct creation and deletion after the pool was used.
52
53 :param name: Name of the pool
54 :param data: Describes the pool in full length
55 :param deletion_name: Only needed if the pool was renamed
56 :return:
57 """
58 data = self._create_pool(name, data)
59 yield data
60 self._delete_pool(deletion_name or data['pool'])
61
62 def _create_pool(self, name, data):
63 data = data or {
64 'pool': name,
65 'pg_num': '32',
66 'pool_type': 'replicated',
67 'compression_algorithm': 'snappy',
68 'compression_mode': 'passive',
69 'compression_max_blob_size': '131072',
70 'compression_required_ratio': '0.875',
71 'application_metadata': ['rbd'],
72 'configuration': {
73 'rbd_qos_bps_limit': 1024000,
74 'rbd_qos_iops_limit': 5000,
75 }
76 }
77 self._task_post('/api/pool/', data)
78 self.assertStatus(201)
79 self._validate_pool_properties(data, self._get_pool(data['pool']))
80 return data
81
82 def _delete_pool(self, name):
83 self._task_delete('/api/pool/' + name)
84 self.assertStatus(204)
85
86 def _validate_pool_properties(self, data, pool, timeout=DashboardTestCase.TIMEOUT_HEALTH_CLEAR):
87 # pylint: disable=too-many-branches
88 for prop, value in data.items():
89 if prop == 'pool_type':
90 self.assertEqual(pool['type'], value)
91 elif prop == 'size':
92 self.assertEqual(pool[prop], int(value),
93 '{}: {} != {}'.format(prop, pool[prop], value))
94 elif prop == 'pg_num':
95 self._check_pg_num(pool['pool_name'], int(value))
96 elif prop == 'application_metadata':
97 self.assertIsInstance(pool[prop], list)
98 self.assertEqual(value, pool[prop])
99 elif prop == 'pool':
100 self.assertEqual(pool['pool_name'], value)
101 elif prop.startswith('compression'):
102 if value is not None:
103 if prop.endswith('size'):
104 value = int(value)
105 elif prop.endswith('ratio'):
106 value = float(value)
107 self.assertEqual(pool['options'][prop], value)
108 else:
109 self.assertEqual(pool['options'], {})
110 elif prop == 'configuration':
111 # configuration cannot really be checked here for two reasons:
112 # 1. The default value cannot be given to this method, which becomes relevant
113 # when resetting a value, because it's not always zero.
114 # 2. The expected `source` cannot be given to this method, and it cannot
115 # reliably be determined (see 1)
116 pass
117 else:
118 self.assertEqual(pool[prop], value, '{}: {} != {}'.format(prop, pool[prop], value))
119
120 self.wait_for_health_clear(timeout)
121
122 def _get_pool(self, pool_name):
123 pool = self._get("/api/pool/" + pool_name)
124 self.assertStatus(200)
125 self.assertSchemaBody(self.pool_schema)
126 return pool
127
128 def _check_pg_num(self, pool_name, pg_num):
129 """
130 If both properties have not the same value, the cluster goes into a warning state, which
131 will only happen during a pg update on an existing pool. The test that does that is
132 currently commented out because our QA systems can't deal with the change. Feel free to test
133 it locally.
134 """
135 self.wait_until_equal(
136 lambda: self._get_pool(pool_name)['pg_placement_num'],
137 expect_val=pg_num,
138 timeout=180
139 )
140
141 pool = self._get_pool(pool_name)
142
143 for prop in ['pg_num', 'pg_placement_num']:
144 self.assertEqual(pool[prop], int(pg_num),
145 '{}: {} != {}'.format(prop, pool[prop], pg_num))
146
147 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['create', 'update', 'delete']}])
148 def test_read_access_permissions(self):
149 self._get('/api/pool')
150 self.assertStatus(403)
151 self._get('/api/pool/bla')
152 self.assertStatus(403)
153
154 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'update', 'delete']}])
155 def test_create_access_permissions(self):
156 self._task_post('/api/pool/', {})
157 self.assertStatus(403)
158
159 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'create', 'update']}])
160 def test_delete_access_permissions(self):
161 self._delete('/api/pool/ddd')
162 self.assertStatus(403)
163
164 def test_pool_configuration(self):
165 pool_name = '.mgr'
166 data = self._get('/api/pool/{}/configuration'.format(pool_name))
167 self.assertStatus(200)
168 self.assertSchema(data, JList(JObj({
169 'name': str,
170 'value': str,
171 'source': int
172 })))
173
174 def test_pool_list(self):
175 data = self._get("/api/pool")
176 self.assertStatus(200)
177
178 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
179 self.assertEqual(len(cluster_pools), len(data))
180 self.assertSchemaBody(JList(self.pool_schema))
181 for pool in data:
182 self.assertNotIn('pg_status', pool)
183 self.assertNotIn('stats', pool)
184 self.assertIn(pool['pool_name'], cluster_pools)
185
186 def test_pool_list_attrs(self):
187 data = self._get("/api/pool?attrs=type,flags")
188 self.assertStatus(200)
189
190 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
191 self.assertEqual(len(cluster_pools), len(data))
192 for pool in data:
193 self.assertIn('pool_name', pool)
194 self.assertIn('type', pool)
195 self.assertIn('flags', pool)
196 self.assertNotIn('flags_names', pool)
197 self.assertNotIn('pg_status', pool)
198 self.assertNotIn('stats', pool)
199 self.assertIn(pool['pool_name'], cluster_pools)
200
201 def test_pool_list_stats(self):
202 data = self._get("/api/pool?stats=true")
203 self.assertStatus(200)
204
205 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
206 self.assertEqual(len(cluster_pools), len(data))
207 self.assertSchemaBody(JList(self.pool_schema))
208 for pool in data:
209 self.assertIn('pool_name', pool)
210 self.assertIn('type', pool)
211 self.assertIn('application_metadata', pool)
212 self.assertIn('flags', pool)
213 self.assertIn('pg_status', pool)
214 self.assertSchema(pool['stats'], self.pool_list_stats_schema)
215 self.assertIn('flags_names', pool)
216 self.assertIn(pool['pool_name'], cluster_pools)
217
218 def test_pool_get(self):
219 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
220 pool = self._get("/api/pool/{}?stats=true&attrs=type,flags,stats"
221 .format(cluster_pools[0]))
222 self.assertEqual(pool['pool_name'], cluster_pools[0])
223 self.assertIn('type', pool)
224 self.assertIn('flags', pool)
225 self.assertNotIn('pg_status', pool)
226 self.assertSchema(pool['stats'], self.pool_list_stats_schema)
227 self.assertNotIn('flags_names', pool)
228 self.assertSchema(pool['configuration'], self.pool_rbd_conf_schema)
229
230 def test_pool_create_with_two_applications(self):
231 self.__yield_pool(None, {
232 'pool': 'dashboard_pool1',
233 'pg_num': '32',
234 'pool_type': 'replicated',
235 'application_metadata': ['rbd', 'sth'],
236 })
237
238 def test_pool_create_with_ecp_and_rule(self):
239 self._ceph_cmd(['osd', 'crush', 'rule', 'create-erasure', 'ecrule'])
240 self._ceph_cmd(
241 ['osd', 'erasure-code-profile', 'set', 'ecprofile', 'crush-failure-domain=osd'])
242 self.__yield_pool(None, {
243 'pool': 'dashboard_pool2',
244 'pg_num': '32',
245 'pool_type': 'erasure',
246 'application_metadata': ['rbd'],
247 'erasure_code_profile': 'ecprofile',
248 'crush_rule': 'ecrule',
249 })
250 self._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecprofile'])
251
252 def test_pool_create_with_compression(self):
253 pool = {
254 'pool': 'dashboard_pool3',
255 'pg_num': '32',
256 'pool_type': 'replicated',
257 'compression_algorithm': 'zstd',
258 'compression_mode': 'aggressive',
259 'compression_max_blob_size': '10000000',
260 'compression_required_ratio': '0.8',
261 'application_metadata': ['rbd'],
262 'configuration': {
263 'rbd_qos_bps_limit': 2048,
264 'rbd_qos_iops_limit': None,
265 },
266 }
267 with self.__yield_pool(None, pool):
268 expected_configuration = [{
269 'name': 'rbd_qos_bps_limit',
270 'source': 1,
271 'value': '2048',
272 }, {
273 'name': 'rbd_qos_iops_limit',
274 'source': 0,
275 'value': '0',
276 }]
277 new_pool = self._get_pool(pool['pool'])
278 for conf in expected_configuration:
279 self.assertIn(conf, new_pool['configuration'])
280
281 def test_pool_create_with_quotas(self):
282 pools = [
283 {
284 'pool_data': {
285 'pool': 'dashboard_pool_quota1',
286 'pg_num': '32',
287 'pool_type': 'replicated',
288 'application_metadata': ['rbd'],
289 },
290 'pool_quotas_to_check': {
291 'quota_max_objects': 0,
292 'quota_max_bytes': 0,
293 }
294 },
295 {
296 'pool_data': {
297 'pool': 'dashboard_pool_quota2',
298 'pg_num': '32',
299 'pool_type': 'replicated',
300 'application_metadata': ['rbd'],
301 'quota_max_objects': 1024,
302 'quota_max_bytes': 1000,
303 },
304 'pool_quotas_to_check': {
305 'quota_max_objects': 1024,
306 'quota_max_bytes': 1000,
307 }
308 }
309 ]
310
311 for pool in pools:
312 pool_name = pool['pool_data']['pool']
313 with self.__yield_pool(pool_name, pool['pool_data']):
314 self._validate_pool_properties(pool['pool_quotas_to_check'],
315 self._get_pool(pool_name))
316
317 def test_pool_update_name(self):
318 name = 'pool_update'
319 updated_name = 'pool_updated_name'
320 with self.__yield_pool(name, None, updated_name):
321 props = {'pool': updated_name}
322 self._task_put('/api/pool/{}'.format(name), props)
323 time.sleep(5)
324 self.assertStatus(200)
325 self._validate_pool_properties(props, self._get_pool(updated_name))
326
327 def test_pool_update_metadata(self):
328 pool_name = 'pool_update_metadata'
329 with self.__yield_pool(pool_name):
330 props = {'application_metadata': ['rbd', 'sth']}
331 self._task_put('/api/pool/{}'.format(pool_name), props)
332 self._validate_pool_properties(props, self._get_pool(pool_name),
333 self.TIMEOUT_HEALTH_CLEAR * 2)
334
335 properties = {'application_metadata': ['rgw']}
336 self._task_put('/api/pool/' + pool_name, properties)
337 self._validate_pool_properties(properties, self._get_pool(pool_name),
338 self.TIMEOUT_HEALTH_CLEAR * 2)
339
340 properties = {'application_metadata': ['rbd', 'sth']}
341 self._task_put('/api/pool/' + pool_name, properties)
342 self._validate_pool_properties(properties, self._get_pool(pool_name),
343 self.TIMEOUT_HEALTH_CLEAR * 2)
344
345 properties = {'application_metadata': ['rgw']}
346 self._task_put('/api/pool/' + pool_name, properties)
347 self._validate_pool_properties(properties, self._get_pool(pool_name),
348 self.TIMEOUT_HEALTH_CLEAR * 2)
349
350 def test_pool_update_configuration(self):
351 pool_name = 'pool_update_configuration'
352 with self.__yield_pool(pool_name):
353 configuration = {
354 'rbd_qos_bps_limit': 1024,
355 'rbd_qos_iops_limit': None,
356 }
357 expected_configuration = [{
358 'name': 'rbd_qos_bps_limit',
359 'source': 1,
360 'value': '1024',
361 }, {
362 'name': 'rbd_qos_iops_limit',
363 'source': 0,
364 'value': '0',
365 }]
366 self._task_put('/api/pool/' + pool_name, {'configuration': configuration})
367 time.sleep(5)
368 pool_config = self._get_pool(pool_name)['configuration']
369 for conf in expected_configuration:
370 self.assertIn(conf, pool_config)
371
372 def test_pool_update_compression(self):
373 pool_name = 'pool_update_compression'
374 with self.__yield_pool(pool_name):
375 properties = {
376 'compression_algorithm': 'zstd',
377 'compression_mode': 'aggressive',
378 'compression_max_blob_size': '10000000',
379 'compression_required_ratio': '0.8',
380 }
381 self._task_put('/api/pool/' + pool_name, properties)
382 time.sleep(5)
383 self._validate_pool_properties(properties, self._get_pool(pool_name))
384
385 def test_pool_update_unset_compression(self):
386 pool_name = 'pool_update_unset_compression'
387 with self.__yield_pool(pool_name):
388 self._task_put('/api/pool/' + pool_name, {'compression_mode': 'unset'})
389 time.sleep(5)
390 self._validate_pool_properties({
391 'compression_algorithm': None,
392 'compression_mode': None,
393 'compression_max_blob_size': None,
394 'compression_required_ratio': None,
395 }, self._get_pool(pool_name))
396
397 def test_pool_update_quotas(self):
398 pool_name = 'pool_update_quotas'
399 with self.__yield_pool(pool_name):
400 properties = {
401 'quota_max_objects': 1024,
402 'quota_max_bytes': 1000,
403 }
404 self._task_put('/api/pool/' + pool_name, properties)
405 time.sleep(5)
406 self._validate_pool_properties(properties, self._get_pool(pool_name))
407
408 def test_pool_create_fail(self):
409 data = {'pool_type': u'replicated', 'rule_name': u'dnf', 'pg_num': u'8', 'pool': u'sadfs'}
410 self._task_post('/api/pool/', data)
411 self.assertStatus(400)
412 self.assertJsonBody({
413 'component': 'pool',
414 'code': "2",
415 'detail': "[errno -2] specified rule dnf doesn't exist"
416 })
417
418 def test_pool_info(self):
419 self._get("/ui-api/pool/info")
420 self.assertSchemaBody(JObj({
421 'pool_names': JList(str),
422 'compression_algorithms': JList(str),
423 'compression_modes': JList(str),
424 'is_all_bluestore': bool,
425 'bluestore_compression_algorithm': str,
426 'osd_count': int,
427 'crush_rules_replicated': JList(JObj({}, allow_unknown=True)),
428 'crush_rules_erasure': JList(JObj({}, allow_unknown=True)),
429 'pg_autoscale_default_mode': str,
430 'pg_autoscale_modes': JList(str),
431 'erasure_code_profiles': JList(JObj({}, allow_unknown=True)),
432 'used_rules': JObj({}, allow_unknown=True),
433 'used_profiles': JObj({}, allow_unknown=True),
434 'nodes': JList(JObj({}, allow_unknown=True)),
435 }))