]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/mgr/dashboard/test_pool.py
969318d2a94b3b79496cf1396cf7e992ecd05093
1 # -*- coding: utf-8 -*-
2 from __future__
import absolute_import
7 from contextlib
import contextmanager
9 from .helper
import DashboardTestCase
, JAny
, JList
, JObj
11 log
= logging
.getLogger(__name__
)
14 class PoolTest(DashboardTestCase
):
15 AUTH_ROLES
= ['pool-manager']
17 pool_schema
= JObj(sub_elems
={
20 'application_metadata': JList(str),
23 }, allow_unknown
=True)
25 pool_list_stat_schema
= JObj(sub_elems
={
28 'rates': JList(JAny(none
=False)),
31 pool_list_stats_schema
= JObj(sub_elems
={
32 'bytes_used': pool_list_stat_schema
,
33 'max_avail': pool_list_stat_schema
,
34 'rd_bytes': pool_list_stat_schema
,
35 'wr_bytes': pool_list_stat_schema
,
36 'rd': pool_list_stat_schema
,
37 'wr': pool_list_stat_schema
,
38 }, allow_unknown
=True)
40 pool_rbd_conf_schema
= JList(JObj(sub_elems
={
47 def __yield_pool(self
, name
=None, data
=None, deletion_name
=None):
49 Use either just a name or whole description of a pool to create one.
50 This also validates the correct creation and deletion after the pool was used.
52 :param name: Name of the pool
53 :param data: Describes the pool in full length
54 :param deletion_name: Only needed if the pool was renamed
57 data
= self
._create
_pool
(name
, data
)
59 self
._delete
_pool
(deletion_name
or data
['pool'])
61 def _create_pool(self
, name
, data
):
65 'pool_type': 'replicated',
66 'compression_algorithm': 'snappy',
67 'compression_mode': 'passive',
68 'compression_max_blob_size': '131072',
69 'compression_required_ratio': '0.875',
70 'application_metadata': ['rbd'],
72 'rbd_qos_bps_limit': 1024000,
73 'rbd_qos_iops_limit': 5000,
76 self
._task
_post
('/api/pool/', data
)
77 self
.assertStatus(201)
78 self
._validate
_pool
_properties
(data
, self
._get
_pool
(data
['pool']))
81 def _delete_pool(self
, name
):
82 self
._task
_delete
('/api/pool/' + name
)
83 self
.assertStatus(204)
85 def _validate_pool_properties(self
, data
, pool
):
86 for prop
, value
in data
.items():
87 if prop
== 'pool_type':
88 self
.assertEqual(pool
['type'], value
)
90 self
.assertEqual(pool
[prop
], int(value
),
91 '{}: {} != {}'.format(prop
, pool
[prop
], value
))
92 elif prop
== 'pg_num':
93 self
._check
_pg
_num
(value
, pool
)
94 elif prop
== 'application_metadata':
95 self
.assertIsInstance(pool
[prop
], list)
96 self
.assertEqual(value
, pool
[prop
])
98 self
.assertEqual(pool
['pool_name'], value
)
99 elif prop
.startswith('compression'):
100 if value
is not None:
101 if prop
.endswith('size'):
103 elif prop
.endswith('ratio'):
105 self
.assertEqual(pool
['options'][prop
], value
)
107 self
.assertEqual(pool
['options'], {})
108 elif prop
== 'configuration':
109 # configuration cannot really be checked here for two reasons:
110 # 1. The default value cannot be given to this method, which becomes relevant
111 # when resetting a value, because it's not always zero.
112 # 2. The expected `source` cannot be given to this method, and it cannot
113 # relibably be determined (see 1)
116 self
.assertEqual(pool
[prop
], value
, '{}: {} != {}'.format(prop
, pool
[prop
], value
))
118 health
= self
._get
('/api/health/minimal')['health']
119 self
.assertEqual(health
['status'], 'HEALTH_OK', msg
='health={}'.format(health
))
121 def _get_pool(self
, pool_name
):
122 pool
= self
._get
("/api/pool/" + pool_name
)
123 self
.assertStatus(200)
124 self
.assertSchemaBody(self
.pool_schema
)
127 def _check_pg_num(self
, value
, pool
):
129 If both properties have not the same value, the cluster goes into a warning state, which
130 will only happen during a pg update on an existing pool. The test that does that is
131 currently commented out because our QA systems can't deal with the change. Feel free to test
134 pgp_prop
= 'pg_placement_num'
136 while (int(value
) != pool
[pgp_prop
] or self
._get
('/api/health/minimal')['health']['status']
137 != 'HEALTH_OK') and t
< 180:
140 pool
= self
._get
_pool
(pool
['pool_name'])
141 for p
in ['pg_num', pgp_prop
]: # Should have the same values
142 self
.assertEqual(pool
[p
], int(value
), '{}: {} != {}'.format(p
, pool
[p
], value
))
144 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['create', 'update', 'delete']}])
145 def test_read_access_permissions(self
):
146 self
._get
('/api/pool')
147 self
.assertStatus(403)
148 self
._get
('/api/pool/bla')
149 self
.assertStatus(403)
151 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'update', 'delete']}])
152 def test_create_access_permissions(self
):
153 self
._task
_post
('/api/pool/', {})
154 self
.assertStatus(403)
156 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'create', 'update']}])
157 def test_delete_access_permissions(self
):
158 self
._delete
('/api/pool/ddd')
159 self
.assertStatus(403)
161 def test_pool_list(self
):
162 data
= self
._get
("/api/pool")
163 self
.assertStatus(200)
165 cluster_pools
= self
.ceph_cluster
.mon_manager
.list_pools()
166 self
.assertEqual(len(cluster_pools
), len(data
))
167 self
.assertSchemaBody(JList(self
.pool_schema
))
169 self
.assertNotIn('pg_status', pool
)
170 self
.assertNotIn('stats', pool
)
171 self
.assertIn(pool
['pool_name'], cluster_pools
)
173 def test_pool_list_attrs(self
):
174 data
= self
._get
("/api/pool?attrs=type,flags")
175 self
.assertStatus(200)
177 cluster_pools
= self
.ceph_cluster
.mon_manager
.list_pools()
178 self
.assertEqual(len(cluster_pools
), len(data
))
180 self
.assertIn('pool_name', pool
)
181 self
.assertIn('type', pool
)
182 self
.assertIn('flags', pool
)
183 self
.assertNotIn('flags_names', pool
)
184 self
.assertNotIn('pg_status', pool
)
185 self
.assertNotIn('stats', pool
)
186 self
.assertIn(pool
['pool_name'], cluster_pools
)
188 def test_pool_list_stats(self
):
189 data
= self
._get
("/api/pool?stats=true")
190 self
.assertStatus(200)
192 cluster_pools
= self
.ceph_cluster
.mon_manager
.list_pools()
193 self
.assertEqual(len(cluster_pools
), len(data
))
194 self
.assertSchemaBody(JList(self
.pool_schema
))
196 self
.assertIn('pool_name', pool
)
197 self
.assertIn('type', pool
)
198 self
.assertIn('application_metadata', pool
)
199 self
.assertIn('flags', pool
)
200 self
.assertIn('pg_status', pool
)
201 self
.assertSchema(pool
['stats'], self
.pool_list_stats_schema
)
202 self
.assertIn('flags_names', pool
)
203 self
.assertIn(pool
['pool_name'], cluster_pools
)
205 def test_pool_get(self
):
206 cluster_pools
= self
.ceph_cluster
.mon_manager
.list_pools()
207 pool
= self
._get
("/api/pool/{}?stats=true&attrs=type,flags,stats"
208 .format(cluster_pools
[0]))
209 self
.assertEqual(pool
['pool_name'], cluster_pools
[0])
210 self
.assertIn('type', pool
)
211 self
.assertIn('flags', pool
)
212 self
.assertNotIn('pg_status', pool
)
213 self
.assertSchema(pool
['stats'], self
.pool_list_stats_schema
)
214 self
.assertNotIn('flags_names', pool
)
215 self
.assertSchema(pool
['configuration'], self
.pool_rbd_conf_schema
)
217 def test_pool_create_with_two_applications(self
):
218 self
.__yield
_pool
(None, {
219 'pool': 'dashboard_pool1',
221 'pool_type': 'replicated',
222 'application_metadata': ['rbd', 'sth'],
225 def test_pool_create_with_ecp_and_rule(self
):
226 self
._ceph
_cmd
(['osd', 'crush', 'rule', 'create-erasure', 'ecrule'])
228 ['osd', 'erasure-code-profile', 'set', 'ecprofile', 'crush-failure-domain=osd'])
229 self
.__yield
_pool
(None, {
230 'pool': 'dashboard_pool2',
232 'pool_type': 'erasure',
233 'application_metadata': ['rbd'],
234 'erasure_code_profile': 'ecprofile',
235 'crush_rule': 'ecrule',
237 self
._ceph
_cmd
(['osd', 'erasure-code-profile', 'rm', 'ecprofile'])
239 def test_pool_create_with_compression(self
):
241 'pool': 'dashboard_pool3',
243 'pool_type': 'replicated',
244 'compression_algorithm': 'zstd',
245 'compression_mode': 'aggressive',
246 'compression_max_blob_size': '10000000',
247 'compression_required_ratio': '0.8',
249 'rbd_qos_bps_limit': 2048,
250 'rbd_qos_iops_limit': None,
253 with self
.__yield
_pool
(None, pool
):
254 expected_configuration
= [{
255 'name': 'rbd_qos_bps_limit',
259 'name': 'rbd_qos_iops_limit',
263 new_pool
= self
._get
_pool
(pool
['pool'])
264 for conf
in expected_configuration
:
265 self
.assertIn(conf
, new_pool
['configuration'])
267 def test_pool_create_with_quotas(self
):
271 'pool': 'dashboard_pool_quota1',
273 'pool_type': 'replicated',
275 'pool_quotas_to_check': {
276 'quota_max_objects': 0,
277 'quota_max_bytes': 0,
282 'pool': 'dashboard_pool_quota2',
284 'pool_type': 'replicated',
285 'quota_max_objects': 1024,
286 'quota_max_bytes': 1000,
288 'pool_quotas_to_check': {
289 'quota_max_objects': 1024,
290 'quota_max_bytes': 1000,
296 pool_name
= pool
['pool_data']['pool']
297 with self
.__yield
_pool
(pool_name
, pool
['pool_data']):
298 self
._validate
_pool
_properties
(pool
['pool_quotas_to_check'],
299 self
._get
_pool
(pool_name
))
301 def test_pool_update_name(self
):
303 updated_name
= 'pool_updated_name'
304 with self
.__yield
_pool
(name
, None, updated_name
):
305 props
= {'pool': updated_name
}
306 self
._task
_put
('/api/pool/{}'.format(name
), props
)
308 self
.assertStatus(200)
309 self
._validate
_pool
_properties
(props
, self
._get
_pool
(updated_name
))
311 def test_pool_update_metadata(self
):
312 pool_name
= 'pool_update_metadata'
313 with self
.__yield
_pool
(pool_name
):
314 props
= {'application_metadata': ['rbd', 'sth']}
315 self
._task
_put
('/api/pool/{}'.format(pool_name
), props
)
317 self
._validate
_pool
_properties
(props
, self
._get
_pool
(pool_name
))
319 properties
= {'application_metadata': ['rgw']}
320 self
._task
_put
('/api/pool/' + pool_name
, properties
)
322 self
._validate
_pool
_properties
(properties
, self
._get
_pool
(pool_name
))
324 properties
= {'application_metadata': ['rbd', 'sth']}
325 self
._task
_put
('/api/pool/' + pool_name
, properties
)
327 self
._validate
_pool
_properties
(properties
, self
._get
_pool
(pool_name
))
329 properties
= {'application_metadata': ['rgw']}
330 self
._task
_put
('/api/pool/' + pool_name
, properties
)
332 self
._validate
_pool
_properties
(properties
, self
._get
_pool
(pool_name
))
334 def test_pool_update_configuration(self
):
335 pool_name
= 'pool_update_configuration'
336 with self
.__yield
_pool
(pool_name
):
338 'rbd_qos_bps_limit': 1024,
339 'rbd_qos_iops_limit': None,
341 expected_configuration
= [{
342 'name': 'rbd_qos_bps_limit',
346 'name': 'rbd_qos_iops_limit',
350 self
._task
_put
('/api/pool/' + pool_name
, {'configuration': configuration
})
352 pool_config
= self
._get
_pool
(pool_name
)['configuration']
353 for conf
in expected_configuration
:
354 self
.assertIn(conf
, pool_config
)
356 def test_pool_update_compression(self
):
357 pool_name
= 'pool_update_compression'
358 with self
.__yield
_pool
(pool_name
):
360 'compression_algorithm': 'zstd',
361 'compression_mode': 'aggressive',
362 'compression_max_blob_size': '10000000',
363 'compression_required_ratio': '0.8',
365 self
._task
_put
('/api/pool/' + pool_name
, properties
)
367 self
._validate
_pool
_properties
(properties
, self
._get
_pool
(pool_name
))
369 def test_pool_update_unset_compression(self
):
370 pool_name
= 'pool_update_unset_compression'
371 with self
.__yield
_pool
(pool_name
):
372 self
._task
_put
('/api/pool/' + pool_name
, {'compression_mode': 'unset'})
374 self
._validate
_pool
_properties
({
375 'compression_algorithm': None,
376 'compression_mode': None,
377 'compression_max_blob_size': None,
378 'compression_required_ratio': None,
379 }, self
._get
_pool
(pool_name
))
381 def test_pool_update_quotas(self
):
382 pool_name
= 'pool_update_quotas'
383 with self
.__yield
_pool
(pool_name
):
385 'quota_max_objects': 1024,
386 'quota_max_bytes': 1000,
388 self
._task
_put
('/api/pool/' + pool_name
, properties
)
390 self
._validate
_pool
_properties
(properties
, self
._get
_pool
(pool_name
))
392 def test_pool_create_fail(self
):
393 data
= {'pool_type': u
'replicated', 'rule_name': u
'dnf', 'pg_num': u
'8', 'pool': u
'sadfs'}
394 self
._task
_post
('/api/pool/', data
)
395 self
.assertStatus(400)
396 self
.assertJsonBody({
399 'detail': "[errno -2] specified rule dnf doesn't exist"
402 def test_pool_info(self
):
403 self
._get
("/ui-api/pool/info")
404 self
.assertSchemaBody(JObj({
405 'pool_names': JList(six
.string_types
),
406 'compression_algorithms': JList(six
.string_types
),
407 'compression_modes': JList(six
.string_types
),
408 'is_all_bluestore': bool,
409 'bluestore_compression_algorithm': six
.string_types
,
411 'crush_rules_replicated': JList(JObj({}, allow_unknown
=True)),
412 'crush_rules_erasure': JList(JObj({}, allow_unknown
=True)),
413 'pg_autoscale_default_mode': six
.string_types
,
414 'pg_autoscale_modes': JList(six
.string_types
),
415 'erasure_code_profiles': JList(JObj({}, allow_unknown
=True)),
416 'used_rules': JObj({}, allow_unknown
=True),