]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/mgr/dashboard/test_pool.py
import quincy beta 17.1.0
[ceph.git] / ceph / qa / tasks / mgr / dashboard / test_pool.py
CommitLineData
11fdf7f2
TL
1# -*- coding: utf-8 -*-
2from __future__ import absolute_import
3
4import logging
11fdf7f2 5import time
9f95a23c 6from contextlib import contextmanager
11fdf7f2 7
f6b5b4d7 8from .helper import DashboardTestCase, JAny, JList, JObj, JUnion
11fdf7f2
TL
9
10log = logging.getLogger(__name__)
11
12
13class PoolTest(DashboardTestCase):
14 AUTH_ROLES = ['pool-manager']
15
16 pool_schema = JObj(sub_elems={
17 'pool_name': str,
18 'type': str,
19 'application_metadata': JList(str),
20 'flags': int,
21 'flags_names': str,
22 }, allow_unknown=True)
23
24 pool_list_stat_schema = JObj(sub_elems={
f67539c2 25 'latest': JUnion([int, float]),
11fdf7f2 26 'rate': float,
494da23a 27 'rates': JList(JAny(none=False)),
11fdf7f2
TL
28 })
29
30 pool_list_stats_schema = JObj(sub_elems={
f6b5b4d7 31 'avail_raw': pool_list_stat_schema,
11fdf7f2
TL
32 'bytes_used': pool_list_stat_schema,
33 'max_avail': pool_list_stat_schema,
f6b5b4d7 34 'percent_used': pool_list_stat_schema,
11fdf7f2
TL
35 'rd_bytes': pool_list_stat_schema,
36 'wr_bytes': pool_list_stat_schema,
37 'rd': pool_list_stat_schema,
38 'wr': pool_list_stat_schema,
39 }, allow_unknown=True)
40
9f95a23c
TL
41 pool_rbd_conf_schema = JList(JObj(sub_elems={
42 'name': str,
43 'value': str,
44 'source': int
45 }))
46
47 @contextmanager
48 def __yield_pool(self, name=None, data=None, deletion_name=None):
49 """
50 Use either just a name or whole description of a pool to create one.
51 This also validates the correct creation and deletion after the pool was used.
52
53 :param name: Name of the pool
54 :param data: Describes the pool in full length
55 :param deletion_name: Only needed if the pool was renamed
56 :return:
57 """
58 data = self._create_pool(name, data)
59 yield data
60 self._delete_pool(deletion_name or data['pool'])
61
62 def _create_pool(self, name, data):
63 data = data or {
64 'pool': name,
65 'pg_num': '32',
66 'pool_type': 'replicated',
67 'compression_algorithm': 'snappy',
68 'compression_mode': 'passive',
69 'compression_max_blob_size': '131072',
70 'compression_required_ratio': '0.875',
71 'application_metadata': ['rbd'],
72 'configuration': {
73 'rbd_qos_bps_limit': 1024000,
74 'rbd_qos_iops_limit': 5000,
75 }
76 }
77 self._task_post('/api/pool/', data)
78 self.assertStatus(201)
79 self._validate_pool_properties(data, self._get_pool(data['pool']))
80 return data
11fdf7f2 81
9f95a23c
TL
82 def _delete_pool(self, name):
83 self._task_delete('/api/pool/' + name)
84 self.assertStatus(204)
11fdf7f2 85
f6b5b4d7 86 def _validate_pool_properties(self, data, pool, timeout=DashboardTestCase.TIMEOUT_HEALTH_CLEAR):
f67539c2 87 # pylint: disable=too-many-branches
9f95a23c
TL
88 for prop, value in data.items():
89 if prop == 'pool_type':
90 self.assertEqual(pool['type'], value)
91 elif prop == 'size':
92 self.assertEqual(pool[prop], int(value),
93 '{}: {} != {}'.format(prop, pool[prop], value))
94 elif prop == 'pg_num':
a4b75251 95 self._check_pg_num(pool['pool_name'], int(value))
9f95a23c
TL
96 elif prop == 'application_metadata':
97 self.assertIsInstance(pool[prop], list)
98 self.assertEqual(value, pool[prop])
99 elif prop == 'pool':
100 self.assertEqual(pool['pool_name'], value)
101 elif prop.startswith('compression'):
102 if value is not None:
103 if prop.endswith('size'):
104 value = int(value)
105 elif prop.endswith('ratio'):
106 value = float(value)
107 self.assertEqual(pool['options'][prop], value)
108 else:
109 self.assertEqual(pool['options'], {})
110 elif prop == 'configuration':
111 # configuration cannot really be checked here for two reasons:
112 # 1. The default value cannot be given to this method, which becomes relevant
113 # when resetting a value, because it's not always zero.
114 # 2. The expected `source` cannot be given to this method, and it cannot
f6b5b4d7 115 # reliably be determined (see 1)
9f95a23c
TL
116 pass
117 else:
118 self.assertEqual(pool[prop], value, '{}: {} != {}'.format(prop, pool[prop], value))
11fdf7f2 119
a4b75251 120 self.wait_for_health_clear(timeout)
11fdf7f2
TL
121
122 def _get_pool(self, pool_name):
123 pool = self._get("/api/pool/" + pool_name)
124 self.assertStatus(200)
125 self.assertSchemaBody(self.pool_schema)
126 return pool
127
a4b75251 128 def _check_pg_num(self, pool_name, pg_num):
9f95a23c
TL
129 """
130 If both properties have not the same value, the cluster goes into a warning state, which
131 will only happen during a pg update on an existing pool. The test that does that is
132 currently commented out because our QA systems can't deal with the change. Feel free to test
133 it locally.
134 """
a4b75251
TL
135 self.wait_until_equal(
136 lambda: self._get_pool(pool_name)['pg_placement_num'],
137 expect_val=pg_num,
138 timeout=180
139 )
140
141 pool = self._get_pool(pool_name)
142
143 for prop in ['pg_num', 'pg_placement_num']:
144 self.assertEqual(pool[prop], int(pg_num),
145 '{}: {} != {}'.format(prop, pool[prop], pg_num))
11fdf7f2 146
11fdf7f2
TL
147 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['create', 'update', 'delete']}])
148 def test_read_access_permissions(self):
149 self._get('/api/pool')
150 self.assertStatus(403)
151 self._get('/api/pool/bla')
152 self.assertStatus(403)
153
154 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'update', 'delete']}])
155 def test_create_access_permissions(self):
156 self._task_post('/api/pool/', {})
157 self.assertStatus(403)
158
159 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'create', 'update']}])
160 def test_delete_access_permissions(self):
161 self._delete('/api/pool/ddd')
162 self.assertStatus(403)
163
f6b5b4d7 164 def test_pool_configuration(self):
20effc67 165 pool_name = '.mgr'
f6b5b4d7
TL
166 data = self._get('/api/pool/{}/configuration'.format(pool_name))
167 self.assertStatus(200)
168 self.assertSchema(data, JList(JObj({
f67539c2
TL
169 'name': str,
170 'value': str,
171 'source': int
172 })))
f6b5b4d7 173
11fdf7f2
TL
174 def test_pool_list(self):
175 data = self._get("/api/pool")
176 self.assertStatus(200)
177
178 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
179 self.assertEqual(len(cluster_pools), len(data))
180 self.assertSchemaBody(JList(self.pool_schema))
181 for pool in data:
182 self.assertNotIn('pg_status', pool)
183 self.assertNotIn('stats', pool)
184 self.assertIn(pool['pool_name'], cluster_pools)
185
186 def test_pool_list_attrs(self):
187 data = self._get("/api/pool?attrs=type,flags")
188 self.assertStatus(200)
189
190 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
191 self.assertEqual(len(cluster_pools), len(data))
192 for pool in data:
193 self.assertIn('pool_name', pool)
194 self.assertIn('type', pool)
195 self.assertIn('flags', pool)
196 self.assertNotIn('flags_names', pool)
197 self.assertNotIn('pg_status', pool)
198 self.assertNotIn('stats', pool)
199 self.assertIn(pool['pool_name'], cluster_pools)
200
201 def test_pool_list_stats(self):
202 data = self._get("/api/pool?stats=true")
203 self.assertStatus(200)
204
205 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
206 self.assertEqual(len(cluster_pools), len(data))
207 self.assertSchemaBody(JList(self.pool_schema))
208 for pool in data:
209 self.assertIn('pool_name', pool)
210 self.assertIn('type', pool)
211 self.assertIn('application_metadata', pool)
212 self.assertIn('flags', pool)
213 self.assertIn('pg_status', pool)
214 self.assertSchema(pool['stats'], self.pool_list_stats_schema)
215 self.assertIn('flags_names', pool)
216 self.assertIn(pool['pool_name'], cluster_pools)
217
218 def test_pool_get(self):
219 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
220 pool = self._get("/api/pool/{}?stats=true&attrs=type,flags,stats"
221 .format(cluster_pools[0]))
222 self.assertEqual(pool['pool_name'], cluster_pools[0])
223 self.assertIn('type', pool)
224 self.assertIn('flags', pool)
225 self.assertNotIn('pg_status', pool)
226 self.assertSchema(pool['stats'], self.pool_list_stats_schema)
227 self.assertNotIn('flags_names', pool)
9f95a23c 228 self.assertSchema(pool['configuration'], self.pool_rbd_conf_schema)
11fdf7f2 229
9f95a23c
TL
230 def test_pool_create_with_two_applications(self):
231 self.__yield_pool(None, {
11fdf7f2 232 'pool': 'dashboard_pool1',
92f5a8d4 233 'pg_num': '32',
11fdf7f2
TL
234 'pool_type': 'replicated',
235 'application_metadata': ['rbd', 'sth'],
9f95a23c
TL
236 })
237
238 def test_pool_create_with_ecp_and_rule(self):
239 self._ceph_cmd(['osd', 'crush', 'rule', 'create-erasure', 'ecrule'])
240 self._ceph_cmd(
241 ['osd', 'erasure-code-profile', 'set', 'ecprofile', 'crush-failure-domain=osd'])
242 self.__yield_pool(None, {
11fdf7f2 243 'pool': 'dashboard_pool2',
92f5a8d4 244 'pg_num': '32',
11fdf7f2 245 'pool_type': 'erasure',
9f95a23c 246 'application_metadata': ['rbd'],
11fdf7f2
TL
247 'erasure_code_profile': 'ecprofile',
248 'crush_rule': 'ecrule',
9f95a23c
TL
249 })
250 self._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecprofile'])
251
252 def test_pool_create_with_compression(self):
253 pool = {
11fdf7f2 254 'pool': 'dashboard_pool3',
92f5a8d4 255 'pg_num': '32',
11fdf7f2
TL
256 'pool_type': 'replicated',
257 'compression_algorithm': 'zstd',
258 'compression_mode': 'aggressive',
259 'compression_max_blob_size': '10000000',
260 'compression_required_ratio': '0.8',
f67539c2 261 'application_metadata': ['rbd'],
9f95a23c
TL
262 'configuration': {
263 'rbd_qos_bps_limit': 2048,
264 'rbd_qos_iops_limit': None,
11fdf7f2 265 },
9f95a23c
TL
266 }
267 with self.__yield_pool(None, pool):
268 expected_configuration = [{
269 'name': 'rbd_qos_bps_limit',
270 'source': 1,
271 'value': '2048',
272 }, {
273 'name': 'rbd_qos_iops_limit',
274 'source': 0,
275 'value': '0',
276 }]
277 new_pool = self._get_pool(pool['pool'])
278 for conf in expected_configuration:
279 self.assertIn(conf, new_pool['configuration'])
280
281 def test_pool_create_with_quotas(self):
282 pools = [
11fdf7f2 283 {
9f95a23c
TL
284 'pool_data': {
285 'pool': 'dashboard_pool_quota1',
286 'pg_num': '32',
287 'pool_type': 'replicated',
288 },
289 'pool_quotas_to_check': {
290 'quota_max_objects': 0,
291 'quota_max_bytes': 0,
292 }
11fdf7f2
TL
293 },
294 {
9f95a23c
TL
295 'pool_data': {
296 'pool': 'dashboard_pool_quota2',
297 'pg_num': '32',
298 'pool_type': 'replicated',
299 'quota_max_objects': 1024,
300 'quota_max_bytes': 1000,
301 },
302 'pool_quotas_to_check': {
303 'quota_max_objects': 1024,
304 'quota_max_bytes': 1000,
305 }
306 }
307 ]
308
309 for pool in pools:
310 pool_name = pool['pool_data']['pool']
311 with self.__yield_pool(pool_name, pool['pool_data']):
312 self._validate_pool_properties(pool['pool_quotas_to_check'],
313 self._get_pool(pool_name))
314
315 def test_pool_update_name(self):
316 name = 'pool_update'
317 updated_name = 'pool_updated_name'
318 with self.__yield_pool(name, None, updated_name):
319 props = {'pool': updated_name}
320 self._task_put('/api/pool/{}'.format(name), props)
321 time.sleep(5)
322 self.assertStatus(200)
323 self._validate_pool_properties(props, self._get_pool(updated_name))
324
325 def test_pool_update_metadata(self):
326 pool_name = 'pool_update_metadata'
327 with self.__yield_pool(pool_name):
328 props = {'application_metadata': ['rbd', 'sth']}
329 self._task_put('/api/pool/{}'.format(pool_name), props)
f6b5b4d7
TL
330 self._validate_pool_properties(props, self._get_pool(pool_name),
331 self.TIMEOUT_HEALTH_CLEAR * 2)
9f95a23c
TL
332
333 properties = {'application_metadata': ['rgw']}
334 self._task_put('/api/pool/' + pool_name, properties)
f6b5b4d7
TL
335 self._validate_pool_properties(properties, self._get_pool(pool_name),
336 self.TIMEOUT_HEALTH_CLEAR * 2)
9f95a23c
TL
337
338 properties = {'application_metadata': ['rbd', 'sth']}
339 self._task_put('/api/pool/' + pool_name, properties)
f6b5b4d7
TL
340 self._validate_pool_properties(properties, self._get_pool(pool_name),
341 self.TIMEOUT_HEALTH_CLEAR * 2)
9f95a23c
TL
342
343 properties = {'application_metadata': ['rgw']}
344 self._task_put('/api/pool/' + pool_name, properties)
f6b5b4d7
TL
345 self._validate_pool_properties(properties, self._get_pool(pool_name),
346 self.TIMEOUT_HEALTH_CLEAR * 2)
9f95a23c
TL
347
348 def test_pool_update_configuration(self):
349 pool_name = 'pool_update_configuration'
350 with self.__yield_pool(pool_name):
351 configuration = {
352 'rbd_qos_bps_limit': 1024,
353 'rbd_qos_iops_limit': None,
354 }
355 expected_configuration = [{
356 'name': 'rbd_qos_bps_limit',
357 'source': 1,
358 'value': '1024',
359 }, {
360 'name': 'rbd_qos_iops_limit',
361 'source': 0,
362 'value': '0',
363 }]
364 self._task_put('/api/pool/' + pool_name, {'configuration': configuration})
365 time.sleep(5)
366 pool_config = self._get_pool(pool_name)['configuration']
367 for conf in expected_configuration:
368 self.assertIn(conf, pool_config)
369
370 def test_pool_update_compression(self):
371 pool_name = 'pool_update_compression'
372 with self.__yield_pool(pool_name):
373 properties = {
11fdf7f2
TL
374 'compression_algorithm': 'zstd',
375 'compression_mode': 'aggressive',
376 'compression_max_blob_size': '10000000',
377 'compression_required_ratio': '0.8',
11fdf7f2 378 }
9f95a23c
TL
379 self._task_put('/api/pool/' + pool_name, properties)
380 time.sleep(5)
381 self._validate_pool_properties(properties, self._get_pool(pool_name))
382
383 def test_pool_update_unset_compression(self):
384 pool_name = 'pool_update_unset_compression'
385 with self.__yield_pool(pool_name):
386 self._task_put('/api/pool/' + pool_name, {'compression_mode': 'unset'})
387 time.sleep(5)
388 self._validate_pool_properties({
389 'compression_algorithm': None,
390 'compression_mode': None,
391 'compression_max_blob_size': None,
392 'compression_required_ratio': None,
393 }, self._get_pool(pool_name))
394
395 def test_pool_update_quotas(self):
396 pool_name = 'pool_update_quotas'
397 with self.__yield_pool(pool_name):
398 properties = {
399 'quota_max_objects': 1024,
400 'quota_max_bytes': 1000,
401 }
402 self._task_put('/api/pool/' + pool_name, properties)
403 time.sleep(5)
404 self._validate_pool_properties(properties, self._get_pool(pool_name))
11fdf7f2
TL
405
406 def test_pool_create_fail(self):
407 data = {'pool_type': u'replicated', 'rule_name': u'dnf', 'pg_num': u'8', 'pool': u'sadfs'}
408 self._task_post('/api/pool/', data)
409 self.assertStatus(400)
410 self.assertJsonBody({
411 'component': 'pool',
412 'code': "2",
413 'detail': "[errno -2] specified rule dnf doesn't exist"
414 })
415
416 def test_pool_info(self):
9f95a23c 417 self._get("/ui-api/pool/info")
11fdf7f2 418 self.assertSchemaBody(JObj({
f67539c2
TL
419 'pool_names': JList(str),
420 'compression_algorithms': JList(str),
421 'compression_modes': JList(str),
11fdf7f2 422 'is_all_bluestore': bool,
f67539c2 423 'bluestore_compression_algorithm': str,
11fdf7f2
TL
424 'osd_count': int,
425 'crush_rules_replicated': JList(JObj({}, allow_unknown=True)),
426 'crush_rules_erasure': JList(JObj({}, allow_unknown=True)),
f67539c2
TL
427 'pg_autoscale_default_mode': str,
428 'pg_autoscale_modes': JList(str),
9f95a23c
TL
429 'erasure_code_profiles': JList(JObj({}, allow_unknown=True)),
430 'used_rules': JObj({}, allow_unknown=True),
e306af50 431 'used_profiles': JObj({}, allow_unknown=True),
f6b5b4d7 432 'nodes': JList(JObj({}, allow_unknown=True)),
11fdf7f2 433 }))