]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/mgr/dashboard/test_pool.py
969318d2a94b3b79496cf1396cf7e992ecd05093
[ceph.git] / ceph / qa / tasks / mgr / dashboard / test_pool.py
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3
4 import logging
5 import six
6 import time
7 from contextlib import contextmanager
8
9 from .helper import DashboardTestCase, JAny, JList, JObj
10
11 log = logging.getLogger(__name__)
12
13
14 class PoolTest(DashboardTestCase):
15 AUTH_ROLES = ['pool-manager']
16
17 pool_schema = JObj(sub_elems={
18 'pool_name': str,
19 'type': str,
20 'application_metadata': JList(str),
21 'flags': int,
22 'flags_names': str,
23 }, allow_unknown=True)
24
25 pool_list_stat_schema = JObj(sub_elems={
26 'latest': int,
27 'rate': float,
28 'rates': JList(JAny(none=False)),
29 })
30
31 pool_list_stats_schema = JObj(sub_elems={
32 'bytes_used': pool_list_stat_schema,
33 'max_avail': pool_list_stat_schema,
34 'rd_bytes': pool_list_stat_schema,
35 'wr_bytes': pool_list_stat_schema,
36 'rd': pool_list_stat_schema,
37 'wr': pool_list_stat_schema,
38 }, allow_unknown=True)
39
40 pool_rbd_conf_schema = JList(JObj(sub_elems={
41 'name': str,
42 'value': str,
43 'source': int
44 }))
45
46 @contextmanager
47 def __yield_pool(self, name=None, data=None, deletion_name=None):
48 """
49 Use either just a name or whole description of a pool to create one.
50 This also validates the correct creation and deletion after the pool was used.
51
52 :param name: Name of the pool
53 :param data: Describes the pool in full length
54 :param deletion_name: Only needed if the pool was renamed
55 :return:
56 """
57 data = self._create_pool(name, data)
58 yield data
59 self._delete_pool(deletion_name or data['pool'])
60
61 def _create_pool(self, name, data):
62 data = data or {
63 'pool': name,
64 'pg_num': '32',
65 'pool_type': 'replicated',
66 'compression_algorithm': 'snappy',
67 'compression_mode': 'passive',
68 'compression_max_blob_size': '131072',
69 'compression_required_ratio': '0.875',
70 'application_metadata': ['rbd'],
71 'configuration': {
72 'rbd_qos_bps_limit': 1024000,
73 'rbd_qos_iops_limit': 5000,
74 }
75 }
76 self._task_post('/api/pool/', data)
77 self.assertStatus(201)
78 self._validate_pool_properties(data, self._get_pool(data['pool']))
79 return data
80
81 def _delete_pool(self, name):
82 self._task_delete('/api/pool/' + name)
83 self.assertStatus(204)
84
85 def _validate_pool_properties(self, data, pool):
86 for prop, value in data.items():
87 if prop == 'pool_type':
88 self.assertEqual(pool['type'], value)
89 elif prop == 'size':
90 self.assertEqual(pool[prop], int(value),
91 '{}: {} != {}'.format(prop, pool[prop], value))
92 elif prop == 'pg_num':
93 self._check_pg_num(value, pool)
94 elif prop == 'application_metadata':
95 self.assertIsInstance(pool[prop], list)
96 self.assertEqual(value, pool[prop])
97 elif prop == 'pool':
98 self.assertEqual(pool['pool_name'], value)
99 elif prop.startswith('compression'):
100 if value is not None:
101 if prop.endswith('size'):
102 value = int(value)
103 elif prop.endswith('ratio'):
104 value = float(value)
105 self.assertEqual(pool['options'][prop], value)
106 else:
107 self.assertEqual(pool['options'], {})
108 elif prop == 'configuration':
109 # configuration cannot really be checked here for two reasons:
110 # 1. The default value cannot be given to this method, which becomes relevant
111 # when resetting a value, because it's not always zero.
112 # 2. The expected `source` cannot be given to this method, and it cannot
113 # relibably be determined (see 1)
114 pass
115 else:
116 self.assertEqual(pool[prop], value, '{}: {} != {}'.format(prop, pool[prop], value))
117
118 health = self._get('/api/health/minimal')['health']
119 self.assertEqual(health['status'], 'HEALTH_OK', msg='health={}'.format(health))
120
121 def _get_pool(self, pool_name):
122 pool = self._get("/api/pool/" + pool_name)
123 self.assertStatus(200)
124 self.assertSchemaBody(self.pool_schema)
125 return pool
126
127 def _check_pg_num(self, value, pool):
128 """
129 If both properties have not the same value, the cluster goes into a warning state, which
130 will only happen during a pg update on an existing pool. The test that does that is
131 currently commented out because our QA systems can't deal with the change. Feel free to test
132 it locally.
133 """
134 pgp_prop = 'pg_placement_num'
135 t = 0
136 while (int(value) != pool[pgp_prop] or self._get('/api/health/minimal')['health']['status']
137 != 'HEALTH_OK') and t < 180:
138 time.sleep(2)
139 t += 2
140 pool = self._get_pool(pool['pool_name'])
141 for p in ['pg_num', pgp_prop]: # Should have the same values
142 self.assertEqual(pool[p], int(value), '{}: {} != {}'.format(p, pool[p], value))
143
144 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['create', 'update', 'delete']}])
145 def test_read_access_permissions(self):
146 self._get('/api/pool')
147 self.assertStatus(403)
148 self._get('/api/pool/bla')
149 self.assertStatus(403)
150
151 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'update', 'delete']}])
152 def test_create_access_permissions(self):
153 self._task_post('/api/pool/', {})
154 self.assertStatus(403)
155
156 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'create', 'update']}])
157 def test_delete_access_permissions(self):
158 self._delete('/api/pool/ddd')
159 self.assertStatus(403)
160
161 def test_pool_list(self):
162 data = self._get("/api/pool")
163 self.assertStatus(200)
164
165 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
166 self.assertEqual(len(cluster_pools), len(data))
167 self.assertSchemaBody(JList(self.pool_schema))
168 for pool in data:
169 self.assertNotIn('pg_status', pool)
170 self.assertNotIn('stats', pool)
171 self.assertIn(pool['pool_name'], cluster_pools)
172
173 def test_pool_list_attrs(self):
174 data = self._get("/api/pool?attrs=type,flags")
175 self.assertStatus(200)
176
177 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
178 self.assertEqual(len(cluster_pools), len(data))
179 for pool in data:
180 self.assertIn('pool_name', pool)
181 self.assertIn('type', pool)
182 self.assertIn('flags', pool)
183 self.assertNotIn('flags_names', pool)
184 self.assertNotIn('pg_status', pool)
185 self.assertNotIn('stats', pool)
186 self.assertIn(pool['pool_name'], cluster_pools)
187
188 def test_pool_list_stats(self):
189 data = self._get("/api/pool?stats=true")
190 self.assertStatus(200)
191
192 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
193 self.assertEqual(len(cluster_pools), len(data))
194 self.assertSchemaBody(JList(self.pool_schema))
195 for pool in data:
196 self.assertIn('pool_name', pool)
197 self.assertIn('type', pool)
198 self.assertIn('application_metadata', pool)
199 self.assertIn('flags', pool)
200 self.assertIn('pg_status', pool)
201 self.assertSchema(pool['stats'], self.pool_list_stats_schema)
202 self.assertIn('flags_names', pool)
203 self.assertIn(pool['pool_name'], cluster_pools)
204
205 def test_pool_get(self):
206 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
207 pool = self._get("/api/pool/{}?stats=true&attrs=type,flags,stats"
208 .format(cluster_pools[0]))
209 self.assertEqual(pool['pool_name'], cluster_pools[0])
210 self.assertIn('type', pool)
211 self.assertIn('flags', pool)
212 self.assertNotIn('pg_status', pool)
213 self.assertSchema(pool['stats'], self.pool_list_stats_schema)
214 self.assertNotIn('flags_names', pool)
215 self.assertSchema(pool['configuration'], self.pool_rbd_conf_schema)
216
217 def test_pool_create_with_two_applications(self):
218 self.__yield_pool(None, {
219 'pool': 'dashboard_pool1',
220 'pg_num': '32',
221 'pool_type': 'replicated',
222 'application_metadata': ['rbd', 'sth'],
223 })
224
225 def test_pool_create_with_ecp_and_rule(self):
226 self._ceph_cmd(['osd', 'crush', 'rule', 'create-erasure', 'ecrule'])
227 self._ceph_cmd(
228 ['osd', 'erasure-code-profile', 'set', 'ecprofile', 'crush-failure-domain=osd'])
229 self.__yield_pool(None, {
230 'pool': 'dashboard_pool2',
231 'pg_num': '32',
232 'pool_type': 'erasure',
233 'application_metadata': ['rbd'],
234 'erasure_code_profile': 'ecprofile',
235 'crush_rule': 'ecrule',
236 })
237 self._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecprofile'])
238
239 def test_pool_create_with_compression(self):
240 pool = {
241 'pool': 'dashboard_pool3',
242 'pg_num': '32',
243 'pool_type': 'replicated',
244 'compression_algorithm': 'zstd',
245 'compression_mode': 'aggressive',
246 'compression_max_blob_size': '10000000',
247 'compression_required_ratio': '0.8',
248 'configuration': {
249 'rbd_qos_bps_limit': 2048,
250 'rbd_qos_iops_limit': None,
251 },
252 }
253 with self.__yield_pool(None, pool):
254 expected_configuration = [{
255 'name': 'rbd_qos_bps_limit',
256 'source': 1,
257 'value': '2048',
258 }, {
259 'name': 'rbd_qos_iops_limit',
260 'source': 0,
261 'value': '0',
262 }]
263 new_pool = self._get_pool(pool['pool'])
264 for conf in expected_configuration:
265 self.assertIn(conf, new_pool['configuration'])
266
267 def test_pool_create_with_quotas(self):
268 pools = [
269 {
270 'pool_data': {
271 'pool': 'dashboard_pool_quota1',
272 'pg_num': '32',
273 'pool_type': 'replicated',
274 },
275 'pool_quotas_to_check': {
276 'quota_max_objects': 0,
277 'quota_max_bytes': 0,
278 }
279 },
280 {
281 'pool_data': {
282 'pool': 'dashboard_pool_quota2',
283 'pg_num': '32',
284 'pool_type': 'replicated',
285 'quota_max_objects': 1024,
286 'quota_max_bytes': 1000,
287 },
288 'pool_quotas_to_check': {
289 'quota_max_objects': 1024,
290 'quota_max_bytes': 1000,
291 }
292 }
293 ]
294
295 for pool in pools:
296 pool_name = pool['pool_data']['pool']
297 with self.__yield_pool(pool_name, pool['pool_data']):
298 self._validate_pool_properties(pool['pool_quotas_to_check'],
299 self._get_pool(pool_name))
300
301 def test_pool_update_name(self):
302 name = 'pool_update'
303 updated_name = 'pool_updated_name'
304 with self.__yield_pool(name, None, updated_name):
305 props = {'pool': updated_name}
306 self._task_put('/api/pool/{}'.format(name), props)
307 time.sleep(5)
308 self.assertStatus(200)
309 self._validate_pool_properties(props, self._get_pool(updated_name))
310
311 def test_pool_update_metadata(self):
312 pool_name = 'pool_update_metadata'
313 with self.__yield_pool(pool_name):
314 props = {'application_metadata': ['rbd', 'sth']}
315 self._task_put('/api/pool/{}'.format(pool_name), props)
316 time.sleep(5)
317 self._validate_pool_properties(props, self._get_pool(pool_name))
318
319 properties = {'application_metadata': ['rgw']}
320 self._task_put('/api/pool/' + pool_name, properties)
321 time.sleep(5)
322 self._validate_pool_properties(properties, self._get_pool(pool_name))
323
324 properties = {'application_metadata': ['rbd', 'sth']}
325 self._task_put('/api/pool/' + pool_name, properties)
326 time.sleep(5)
327 self._validate_pool_properties(properties, self._get_pool(pool_name))
328
329 properties = {'application_metadata': ['rgw']}
330 self._task_put('/api/pool/' + pool_name, properties)
331 time.sleep(5)
332 self._validate_pool_properties(properties, self._get_pool(pool_name))
333
334 def test_pool_update_configuration(self):
335 pool_name = 'pool_update_configuration'
336 with self.__yield_pool(pool_name):
337 configuration = {
338 'rbd_qos_bps_limit': 1024,
339 'rbd_qos_iops_limit': None,
340 }
341 expected_configuration = [{
342 'name': 'rbd_qos_bps_limit',
343 'source': 1,
344 'value': '1024',
345 }, {
346 'name': 'rbd_qos_iops_limit',
347 'source': 0,
348 'value': '0',
349 }]
350 self._task_put('/api/pool/' + pool_name, {'configuration': configuration})
351 time.sleep(5)
352 pool_config = self._get_pool(pool_name)['configuration']
353 for conf in expected_configuration:
354 self.assertIn(conf, pool_config)
355
356 def test_pool_update_compression(self):
357 pool_name = 'pool_update_compression'
358 with self.__yield_pool(pool_name):
359 properties = {
360 'compression_algorithm': 'zstd',
361 'compression_mode': 'aggressive',
362 'compression_max_blob_size': '10000000',
363 'compression_required_ratio': '0.8',
364 }
365 self._task_put('/api/pool/' + pool_name, properties)
366 time.sleep(5)
367 self._validate_pool_properties(properties, self._get_pool(pool_name))
368
369 def test_pool_update_unset_compression(self):
370 pool_name = 'pool_update_unset_compression'
371 with self.__yield_pool(pool_name):
372 self._task_put('/api/pool/' + pool_name, {'compression_mode': 'unset'})
373 time.sleep(5)
374 self._validate_pool_properties({
375 'compression_algorithm': None,
376 'compression_mode': None,
377 'compression_max_blob_size': None,
378 'compression_required_ratio': None,
379 }, self._get_pool(pool_name))
380
381 def test_pool_update_quotas(self):
382 pool_name = 'pool_update_quotas'
383 with self.__yield_pool(pool_name):
384 properties = {
385 'quota_max_objects': 1024,
386 'quota_max_bytes': 1000,
387 }
388 self._task_put('/api/pool/' + pool_name, properties)
389 time.sleep(5)
390 self._validate_pool_properties(properties, self._get_pool(pool_name))
391
392 def test_pool_create_fail(self):
393 data = {'pool_type': u'replicated', 'rule_name': u'dnf', 'pg_num': u'8', 'pool': u'sadfs'}
394 self._task_post('/api/pool/', data)
395 self.assertStatus(400)
396 self.assertJsonBody({
397 'component': 'pool',
398 'code': "2",
399 'detail': "[errno -2] specified rule dnf doesn't exist"
400 })
401
402 def test_pool_info(self):
403 self._get("/ui-api/pool/info")
404 self.assertSchemaBody(JObj({
405 'pool_names': JList(six.string_types),
406 'compression_algorithms': JList(six.string_types),
407 'compression_modes': JList(six.string_types),
408 'is_all_bluestore': bool,
409 'bluestore_compression_algorithm': six.string_types,
410 'osd_count': int,
411 'crush_rules_replicated': JList(JObj({}, allow_unknown=True)),
412 'crush_rules_erasure': JList(JObj({}, allow_unknown=True)),
413 'pg_autoscale_default_mode': six.string_types,
414 'pg_autoscale_modes': JList(six.string_types),
415 'erasure_code_profiles': JList(JObj({}, allow_unknown=True)),
416 'used_rules': JObj({}, allow_unknown=True),
417 }))