]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/mgr/dashboard/test_pool.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / qa / tasks / mgr / dashboard / test_pool.py
CommitLineData
11fdf7f2
TL
1# -*- coding: utf-8 -*-
2from __future__ import absolute_import
3
4import logging
5
6import six
7import time
8
9from .helper import DashboardTestCase, JAny, JList, JObj
10
11log = logging.getLogger(__name__)
12
13
14class PoolTest(DashboardTestCase):
15 AUTH_ROLES = ['pool-manager']
16
17 pool_schema = JObj(sub_elems={
18 'pool_name': str,
19 'type': str,
20 'application_metadata': JList(str),
21 'flags': int,
22 'flags_names': str,
23 }, allow_unknown=True)
24
25 pool_list_stat_schema = JObj(sub_elems={
26 'latest': int,
27 'rate': float,
28 'series': JList(JAny(none=False)),
29 })
30
31 pool_list_stats_schema = JObj(sub_elems={
32 'bytes_used': pool_list_stat_schema,
33 'max_avail': pool_list_stat_schema,
34 'rd_bytes': pool_list_stat_schema,
35 'wr_bytes': pool_list_stat_schema,
36 'rd': pool_list_stat_schema,
37 'wr': pool_list_stat_schema,
38 }, allow_unknown=True)
39
40 def _pool_create(self, data):
41 try:
42 self._task_post('/api/pool/', data)
43 self.assertStatus(201)
44
45 self._check_pool_properties(data)
46
47 self._task_delete("/api/pool/" + data['pool'])
48 self.assertStatus(204)
49 except Exception:
50 log.exception("test_pool_create: data=%s", data)
51 raise
52
53 def _check_pool_properties(self, data, pool_name=None):
54 if not pool_name:
55 pool_name = data['pool']
56 pool = self._get_pool(pool_name)
57 try:
58 for k, v in data.items():
59 self._check_pool_property(k, v, pool)
60
61 except Exception:
62 log.exception("test_pool_create: pool=%s", pool)
63 raise
64
65 health = self._get('/api/health/minimal')['health']
66 self.assertEqual(health['status'], 'HEALTH_OK', msg='health={}'.format(health))
67
68 def _get_pool(self, pool_name):
69 pool = self._get("/api/pool/" + pool_name)
70 self.assertStatus(200)
71 self.assertSchemaBody(self.pool_schema)
72 return pool
73
74 def _check_pool_property(self, prop, value, pool):
75 if prop == 'pool_type':
76 self.assertEqual(pool['type'], value)
77 elif prop == 'size':
78 self.assertEqual(pool[prop], int(value), '{}: {} != {}'.format(prop, pool[prop], value))
79 elif prop == 'pg_num':
80 self._check_pg_num(value, pool)
81 elif prop == 'application_metadata':
82 self.assertIsInstance(pool[prop], list)
83 self.assertEqual(pool[prop], value)
84 elif prop == 'pool':
85 self.assertEqual(pool['pool_name'], value)
86 elif prop.startswith('compression'):
87 if value is not None:
88 if prop.endswith('size'):
89 value = int(value)
90 elif prop.endswith('ratio'):
91 value = float(value)
92 self.assertEqual(pool['options'].get(prop), value)
93 else:
94 self.assertEqual(pool[prop], value, '{}: {} != {}'.format(prop, pool[prop], value))
95
96 def _check_pg_num(self, value, pool):
97 # If both properties have not the same value, the cluster goes into a warning state,
98 # which will only happen during a pg update on a existing pool.
99 # The test that does that is currently commented out because
100 # our QA systems can't deal with the change.
101 # Feel free to test it locally.
102 prop = 'pg_num'
103 pgp_prop = 'pg_placement_num'
104 health = lambda: self._get('/api/health/minimal')['health']['status'] == 'HEALTH_OK'
105 t = 0;
106 while (int(value) != pool[pgp_prop] or not health()) and t < 180:
107 time.sleep(2)
108 t += 2
109 pool = self._get_pool(pool['pool_name'])
110 for p in [prop, pgp_prop]: # Should have the same values
111 self.assertEqual(pool[p], int(value), '{}: {} != {}'.format(p, pool[p], value))
112
113 @classmethod
114 def tearDownClass(cls):
115 super(PoolTest, cls).tearDownClass()
116 for name in ['dashboard_pool1', 'dashboard_pool2', 'dashboard_pool3', 'dashboard_pool_update1']:
117 cls._ceph_cmd(['osd', 'pool', 'delete', name, name, '--yes-i-really-really-mean-it'])
118 cls._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecprofile'])
119
120 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['create', 'update', 'delete']}])
121 def test_read_access_permissions(self):
122 self._get('/api/pool')
123 self.assertStatus(403)
124 self._get('/api/pool/bla')
125 self.assertStatus(403)
126
127 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'update', 'delete']}])
128 def test_create_access_permissions(self):
129 self._task_post('/api/pool/', {})
130 self.assertStatus(403)
131
132 @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'create', 'update']}])
133 def test_delete_access_permissions(self):
134 self._delete('/api/pool/ddd')
135 self.assertStatus(403)
136
137 def test_pool_list(self):
138 data = self._get("/api/pool")
139 self.assertStatus(200)
140
141 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
142 self.assertEqual(len(cluster_pools), len(data))
143 self.assertSchemaBody(JList(self.pool_schema))
144 for pool in data:
145 self.assertNotIn('pg_status', pool)
146 self.assertNotIn('stats', pool)
147 self.assertIn(pool['pool_name'], cluster_pools)
148
149 def test_pool_list_attrs(self):
150 data = self._get("/api/pool?attrs=type,flags")
151 self.assertStatus(200)
152
153 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
154 self.assertEqual(len(cluster_pools), len(data))
155 for pool in data:
156 self.assertIn('pool_name', pool)
157 self.assertIn('type', pool)
158 self.assertIn('flags', pool)
159 self.assertNotIn('flags_names', pool)
160 self.assertNotIn('pg_status', pool)
161 self.assertNotIn('stats', pool)
162 self.assertIn(pool['pool_name'], cluster_pools)
163
164 def test_pool_list_stats(self):
165 data = self._get("/api/pool?stats=true")
166 self.assertStatus(200)
167
168 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
169 self.assertEqual(len(cluster_pools), len(data))
170 self.assertSchemaBody(JList(self.pool_schema))
171 for pool in data:
172 self.assertIn('pool_name', pool)
173 self.assertIn('type', pool)
174 self.assertIn('application_metadata', pool)
175 self.assertIn('flags', pool)
176 self.assertIn('pg_status', pool)
177 self.assertSchema(pool['stats'], self.pool_list_stats_schema)
178 self.assertIn('flags_names', pool)
179 self.assertIn(pool['pool_name'], cluster_pools)
180
181 def test_pool_get(self):
182 cluster_pools = self.ceph_cluster.mon_manager.list_pools()
183 pool = self._get("/api/pool/{}?stats=true&attrs=type,flags,stats"
184 .format(cluster_pools[0]))
185 self.assertEqual(pool['pool_name'], cluster_pools[0])
186 self.assertIn('type', pool)
187 self.assertIn('flags', pool)
188 self.assertNotIn('pg_status', pool)
189 self.assertSchema(pool['stats'], self.pool_list_stats_schema)
190 self.assertNotIn('flags_names', pool)
191
192 def test_pool_create(self):
193 self._ceph_cmd(['osd', 'crush', 'rule', 'create-erasure', 'ecrule'])
194 self._ceph_cmd(
195 ['osd', 'erasure-code-profile', 'set', 'ecprofile', 'crush-failure-domain=osd'])
196 pools = [{
197 'pool': 'dashboard_pool1',
198 'pg_num': '10',
199 'pool_type': 'replicated',
200 'application_metadata': ['rbd', 'sth'],
201 }, {
202 'pool': 'dashboard_pool2',
203 'pg_num': '10',
204 'pool_type': 'erasure',
205 'erasure_code_profile': 'ecprofile',
206 'crush_rule': 'ecrule',
207 }, {
208 'pool': 'dashboard_pool3',
209 'pg_num': '10',
210 'pool_type': 'replicated',
211 'compression_algorithm': 'zstd',
212 'compression_mode': 'aggressive',
213 'compression_max_blob_size': '10000000',
214 'compression_required_ratio': '0.8',
215 }]
216 for data in pools:
217 self._pool_create(data)
218
219 def test_update(self):
220 pool = {
221 'pool': 'dashboard_pool_update1',
222 'pg_num': '4',
223 'pool_type': 'replicated',
224 'compression_mode': 'passive',
225 'compression_algorithm': 'snappy',
226 'compression_max_blob_size': '131072',
227 'compression_required_ratio': '0.875',
228 }
229 updates = [
230 {
231 'application_metadata': ['rbd', 'sth'],
232 },
233 # The following test case is currently commented out because
234 # our QA systems can't deal with the change and will fail because
235 # they can't recover from the resulting warning state.
236 # Feel free to test it locally.
237 # {
238 # 'pg_num': '8',
239 # },
240 {
241 'application_metadata': ['rgw'],
242 },
243 {
244 'compression_algorithm': 'zstd',
245 'compression_mode': 'aggressive',
246 'compression_max_blob_size': '10000000',
247 'compression_required_ratio': '0.8',
248 },
249 {
250 'compression_mode': 'unset'
251 }
252 ]
253 self._task_post('/api/pool/', pool)
254 self.assertStatus(201)
255 self._check_pool_properties(pool)
256
257 for update in updates:
258 self._task_put('/api/pool/' + pool['pool'], update)
259 if update.get('compression_mode') == 'unset':
260 update = {
261 'compression_mode': None,
262 'compression_algorithm': None,
263 'compression_mode': None,
264 'compression_max_blob_size': None,
265 'compression_required_ratio': None,
266 }
267 self._check_pool_properties(update, pool_name=pool['pool'])
268 self._task_delete("/api/pool/" + pool['pool'])
269 self.assertStatus(204)
270
271 def test_pool_create_fail(self):
272 data = {'pool_type': u'replicated', 'rule_name': u'dnf', 'pg_num': u'8', 'pool': u'sadfs'}
273 self._task_post('/api/pool/', data)
274 self.assertStatus(400)
275 self.assertJsonBody({
276 'component': 'pool',
277 'code': "2",
278 'detail': "[errno -2] specified rule dnf doesn't exist"
279 })
280
281 def test_pool_info(self):
282 self._get("/api/pool/_info")
283 self.assertSchemaBody(JObj({
284 'pool_names': JList(six.string_types),
285 'compression_algorithms': JList(six.string_types),
286 'compression_modes': JList(six.string_types),
287 'is_all_bluestore': bool,
288 "bluestore_compression_algorithm": six.string_types,
289 'osd_count': int,
290 'crush_rules_replicated': JList(JObj({}, allow_unknown=True)),
291 'crush_rules_erasure': JList(JObj({}, allow_unknown=True)),
292 }))