]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/dashboard/tests/test_osd.py
import ceph quincy 17.2.4
[ceph.git] / ceph / src / pybind / mgr / dashboard / tests / test_osd.py
CommitLineData
11fdf7f2 1# -*- coding: utf-8 -*-
11fdf7f2
TL
2import uuid
3from contextlib import contextmanager
f67539c2
TL
4from typing import Any, Dict, List, Optional
5from unittest import mock
11fdf7f2 6
f67539c2 7from ceph.deployment.drive_group import DeviceSelection, DriveGroupSpec # type: ignore
2a845540 8from ceph.deployment.service_spec import PlacementSpec
11fdf7f2 9
f67539c2 10from .. import mgr
2a845540
TL
11from ..controllers.osd import Osd, OsdUi
12from ..services.osd import OsdDeploymentOptions
a4b75251 13from ..tests import ControllerTestCase
9f95a23c 14from ..tools import NotificationQueue, TaskManager
f67539c2 15from .helper import update_dict # pylint: disable=import-error
11fdf7f2
TL
16
17
18class OsdHelper(object):
19 DEFAULT_OSD_IDS = [0, 1, 2]
20
21 @staticmethod
f67539c2
TL
22 def _gen_osdmap_tree_node(node_id: int, node_type: str, children: Optional[List[int]] = None,
23 update_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
11fdf7f2
TL
24 assert node_type in ['root', 'host', 'osd']
25 if node_type in ['root', 'host']:
26 assert children is not None
27
28 node_types = {
29 'root': {
30 'id': node_id,
31 'name': 'default',
32 'type': 'root',
33 'type_id': 10,
34 'children': children,
35 },
36 'host': {
37 'id': node_id,
38 'name': 'ceph-1',
39 'type': 'host',
40 'type_id': 1,
41 'pool_weights': {},
42 'children': children,
43 },
44 'osd': {
45 'id': node_id,
46 'device_class': 'hdd',
47 'type': 'osd',
48 'type_id': 0,
49 'crush_weight': 0.009796142578125,
50 'depth': 2,
51 'pool_weights': {},
52 'exists': 1,
53 'status': 'up',
54 'reweight': 1.0,
55 'primary_affinity': 1.0,
56 'name': 'osd.{}'.format(node_id),
57 }
58 }
59 node = node_types[node_type]
60
61 return update_dict(node, update_data) if update_data else node
62
63 @staticmethod
f67539c2 64 def _gen_osd_stats(osd_id: int, update_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
11fdf7f2
TL
65 stats = {
66 'osd': osd_id,
67 'up_from': 11,
68 'seq': 47244640581,
69 'num_pgs': 50,
70 'kb': 10551288,
71 'kb_used': 1119736,
72 'kb_used_data': 5504,
73 'kb_used_omap': 0,
74 'kb_used_meta': 1048576,
75 'kb_avail': 9431552,
76 'statfs': {
77 'total': 10804518912,
78 'available': 9657909248,
79 'internally_reserved': 1073741824,
80 'allocated': 5636096,
81 'data_stored': 102508,
82 'data_compressed': 0,
83 'data_compressed_allocated': 0,
84 'data_compressed_original': 0,
85 'omap_allocated': 0,
86 'internal_metadata': 1073741824
87 },
88 'hb_peers': [0, 1],
89 'snap_trim_queue_len': 0,
90 'num_snap_trimming': 0,
91 'op_queue_age_hist': {
92 'histogram': [],
93 'upper_bound': 1
94 },
95 'perf_stat': {
96 'commit_latency_ms': 0.0,
97 'apply_latency_ms': 0.0,
98 'commit_latency_ns': 0,
99 'apply_latency_ns': 0
100 },
101 'alerts': [],
102 }
103 return stats if not update_data else update_dict(stats, update_data)
104
105 @staticmethod
f67539c2 106 def _gen_osd_map_osd(osd_id: int) -> Dict[str, Any]:
11fdf7f2
TL
107 return {
108 'osd': osd_id,
109 'up': 1,
110 'in': 1,
111 'weight': 1.0,
112 'primary_affinity': 1.0,
113 'last_clean_begin': 0,
114 'last_clean_end': 0,
115 'up_from': 5,
116 'up_thru': 21,
117 'down_at': 0,
118 'lost_at': 0,
119 'public_addrs': {
120 'addrvec': [{
121 'type': 'v2',
122 'nonce': 1302,
123 'addr': '172.23.0.2:6802'
124 }, {
125 'type': 'v1',
126 'nonce': 1302,
127 'addr': '172.23.0.2:6803'
128 }]
129 },
130 'cluster_addrs': {
131 'addrvec': [{
132 'type': 'v2',
133 'nonce': 1302,
134 'addr': '172.23.0.2:6804'
135 }, {
136 'type': 'v1',
137 'nonce': 1302,
138 'addr': '172.23.0.2:6805'
139 }]
140 },
141 'heartbeat_back_addrs': {
142 'addrvec': [{
143 'type': 'v2',
144 'nonce': 1302,
145 'addr': '172.23.0.2:6808'
146 }, {
147 'type': 'v1',
148 'nonce': 1302,
149 'addr': '172.23.0.2:6809'
150 }]
151 },
152 'heartbeat_front_addrs': {
153 'addrvec': [{
154 'type': 'v2',
155 'nonce': 1302,
156 'addr': '172.23.0.2:6806'
157 }, {
158 'type': 'v1',
159 'nonce': 1302,
160 'addr': '172.23.0.2:6807'
161 }]
162 },
163 'state': ['exists', 'up'],
164 'uuid': str(uuid.uuid4()),
165 'public_addr': '172.23.0.2:6803/1302',
166 'cluster_addr': '172.23.0.2:6805/1302',
167 'heartbeat_back_addr': '172.23.0.2:6809/1302',
168 'heartbeat_front_addr': '172.23.0.2:6807/1302',
169 'id': osd_id,
170 }
171
172 @classmethod
f67539c2 173 def gen_osdmap(cls, ids: Optional[List[int]] = None) -> Dict[str, Any]:
11fdf7f2
TL
174 return {str(i): cls._gen_osd_map_osd(i) for i in ids or cls.DEFAULT_OSD_IDS}
175
176 @classmethod
f67539c2 177 def gen_osd_stats(cls, ids: Optional[List[int]] = None) -> List[Dict[str, Any]]:
11fdf7f2
TL
178 return [cls._gen_osd_stats(i) for i in ids or cls.DEFAULT_OSD_IDS]
179
180 @classmethod
f67539c2 181 def gen_osdmap_tree_nodes(cls, ids: Optional[List[int]] = None) -> List[Dict[str, Any]]:
11fdf7f2
TL
182 return [
183 cls._gen_osdmap_tree_node(-1, 'root', [-3]),
184 cls._gen_osdmap_tree_node(-3, 'host', ids or cls.DEFAULT_OSD_IDS),
185 ] + [cls._gen_osdmap_tree_node(node_id, 'osd') for node_id in ids or cls.DEFAULT_OSD_IDS]
186
187 @classmethod
f67539c2 188 def gen_mgr_get_counter(cls) -> List[List[int]]:
11fdf7f2
TL
189 return [[1551973855, 35], [1551973860, 35], [1551973865, 35], [1551973870, 35]]
190
2a845540
TL
191 @staticmethod
192 def mock_inventory_host(orch_client_mock, devices_data: Dict[str, str]) -> None:
193 class MockDevice:
194 def __init__(self, human_readable_type, path, available=True):
195 self.human_readable_type = human_readable_type
196 self.available = available
197 self.path = path
198
199 def create_invetory_host(host, devices_data):
200 inventory_host = mock.Mock()
201 inventory_host.devices.devices = []
202 for data in devices_data:
203 if data['host'] != host:
204 continue
205 inventory_host.devices.devices.append(MockDevice(data['type'], data['path']))
206 return inventory_host
207
208 hosts = set()
209 for device in devices_data:
210 hosts.add(device['host'])
211
212 inventory = [create_invetory_host(host, devices_data) for host in hosts]
213 orch_client_mock.inventory.list.return_value = inventory
214
11fdf7f2
TL
215
216class OsdTest(ControllerTestCase):
217 @classmethod
218 def setup_server(cls):
2a845540 219 cls.setup_controllers([Osd, OsdUi])
9f95a23c
TL
220 NotificationQueue.start_queue()
221 TaskManager.init()
222
223 @classmethod
224 def tearDownClass(cls):
225 NotificationQueue.stop()
11fdf7f2
TL
226
227 @contextmanager
228 def _mock_osd_list(self, osd_stat_ids, osdmap_tree_node_ids, osdmap_ids):
229 def mgr_get_replacement(*args, **kwargs):
230 method = args[0] or kwargs['method']
231 if method == 'osd_stats':
232 return {'osd_stats': OsdHelper.gen_osd_stats(osd_stat_ids)}
233 if method == 'osd_map_tree':
234 return {'nodes': OsdHelper.gen_osdmap_tree_nodes(osdmap_tree_node_ids)}
235 raise NotImplementedError()
236
237 def mgr_get_counter_replacement(svc_type, _, path):
238 if svc_type == 'osd':
239 return {path: OsdHelper.gen_mgr_get_counter()}
240 raise NotImplementedError()
241
9f95a23c
TL
242 with mock.patch.object(Osd, 'get_osd_map', return_value=OsdHelper.gen_osdmap(osdmap_ids)):
243 with mock.patch.object(mgr, 'get', side_effect=mgr_get_replacement):
244 with mock.patch.object(mgr, 'get_counter', side_effect=mgr_get_counter_replacement):
245 with mock.patch.object(mgr, 'get_latest', return_value=1146609664):
f67539c2
TL
246 with mock.patch.object(Osd, 'get_removing_osds', return_value=[]):
247 yield
248
249 def _get_drive_group_data(self, service_id='all_hdd', host_pattern_k='host_pattern',
250 host_pattern_v='*'):
251 return {
252 'method': 'drive_groups',
253 'data': [
254 {
255 'service_type': 'osd',
256 'service_id': service_id,
257 'data_devices': {
258 'rotational': True
259 },
260 host_pattern_k: host_pattern_v
261 }
262 ],
263 'tracking_id': 'all_hdd, b_ssd'
264 }
11fdf7f2
TL
265
266 def test_osd_list_aggregation(self):
267 """
268 This test emulates the state of a cluster where an OSD has only been
269 removed (with e.g. `ceph osd rm`), but it hasn't been removed from the
270 CRUSH map. Ceph reports a health warning alongside a `1 osds exist in
271 the crush map but not in the osdmap` warning in such a case.
272 """
273 osds_actual = [0, 1]
274 osds_leftover = [0, 1, 2]
275 with self._mock_osd_list(osd_stat_ids=osds_actual, osdmap_tree_node_ids=osds_leftover,
276 osdmap_ids=osds_actual):
277 self._get('/api/osd')
9f95a23c 278 self.assertEqual(len(self.json_body()), 2, 'It should display two OSDs without failure')
11fdf7f2 279 self.assertStatus(200)
9f95a23c 280
20effc67
TL
281 @mock.patch('dashboard.controllers.osd.CephService')
282 def test_osd_scrub(self, ceph_service):
283 self._task_post('/api/osd/1/scrub', {'deep': True})
284 ceph_service.send_command.assert_called_once_with('mon', 'osd deep-scrub', who='1')
285 self.assertStatus(200)
286 self.assertJsonBody(None)
287
9f95a23c
TL
288 @mock.patch('dashboard.controllers.osd.CephService')
289 def test_osd_create_bare(self, ceph_service):
290 ceph_service.send_command.return_value = '5'
f67539c2
TL
291 sample_data = {
292 'uuid': 'f860ca2e-757d-48ce-b74a-87052cad563f',
293 'svc_id': 5
294 }
295
9f95a23c
TL
296 data = {
297 'method': 'bare',
f67539c2 298 'data': sample_data,
9f95a23c
TL
299 'tracking_id': 'bare-5'
300 }
301 self._task_post('/api/osd', data)
302 self.assertStatus(201)
303 ceph_service.send_command.assert_called()
304
20effc67
TL
305 # unknown method
306 data['method'] = 'other'
307 self._task_post('/api/osd', data)
308 self.assertStatus(400)
309 res = self.json_body()
310 self.assertIn('Unknown method', res['detail'])
311
312 # svc_id is not int
313 data['data']['svc_id'] = "five"
314 data['method'] = 'bare'
315 self._task_post('/api/osd', data)
316 self.assertStatus(400)
317 res = self.json_body()
318 self.assertIn(data['data']['svc_id'], res['detail'])
319
9f95a23c
TL
320 @mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
321 def test_osd_create_with_drive_groups(self, instance):
322 # without orchestrator service
323 fake_client = mock.Mock()
324 instance.return_value = fake_client
325
326 # Valid DriveGroup
f67539c2 327 data = self._get_drive_group_data()
9f95a23c
TL
328
329 # Without orchestrator service
330 fake_client.available.return_value = False
331 self._task_post('/api/osd', data)
332 self.assertStatus(503)
333
334 # With orchestrator service
335 fake_client.available.return_value = True
f67539c2 336 fake_client.get_missing_features.return_value = []
9f95a23c
TL
337 self._task_post('/api/osd', data)
338 self.assertStatus(201)
339 dg_specs = [DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
340 service_id='all_hdd',
341 service_type='osd',
342 data_devices=DeviceSelection(rotational=True))]
343 fake_client.osds.create.assert_called_with(dg_specs)
344
345 @mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
346 def test_osd_create_with_invalid_drive_groups(self, instance):
347 # without orchestrator service
348 fake_client = mock.Mock()
349 instance.return_value = fake_client
f67539c2 350 fake_client.get_missing_features.return_value = []
9f95a23c
TL
351
352 # Invalid DriveGroup
f67539c2 353 data = self._get_drive_group_data('invalid_dg', 'host_pattern_wrong', 'unknown')
9f95a23c
TL
354 self._task_post('/api/osd', data)
355 self.assertStatus(400)
f67539c2
TL
356
357 @mock.patch('dashboard.controllers.osd.CephService')
358 def test_osd_mark_all_actions(self, instance):
359 fake_client = mock.Mock()
360 instance.return_value = fake_client
361 action_list = ['OUT', 'IN', 'DOWN']
20effc67 362
f67539c2
TL
363 for action in action_list:
364 data = {'action': action}
365 self._task_put('/api/osd/1/mark', data)
366 self.assertStatus(200)
20effc67
TL
367
368 # invalid mark
369 instance.reset_mock()
370 with self.assertLogs(level='ERROR') as cm:
371 self._task_put('/api/osd/1/mark', {'action': 'OTHER'})
372 instance.send_command.assert_not_called()
373 self.assertIn('Invalid OSD mark action', cm.output[0])
374 self.assertStatus(200)
375 self.assertJsonBody(None)
376
377 self._task_post('/api/osd/1/purge', {'svc_id': 1})
378 instance.send_command.assert_called_once_with('mon', 'osd purge-actual', id=1,
379 yes_i_really_mean_it=True)
380 self.assertStatus(200)
381 self.assertJsonBody(None)
382
383 @mock.patch('dashboard.controllers.osd.CephService')
384 def test_reweight_osd(self, instance):
385 instance.send_command.return_value = '5'
386 uuid1 = str(uuid.uuid1())
387 sample_data = {
388 'uuid': uuid1,
389 'svc_id': 1
390 }
391 data = {
392 'method': 'bare',
393 'data': sample_data,
394 'tracking_id': 'bare-1'
395 }
396 self._task_post('/api/osd', data)
397 self._task_put('/api/osd/1/mark', {'action': 'DOWN'})
398 self.assertStatus(200)
399 self._task_post('/api/osd/1/reweight', {'weight': '1'})
400 instance.send_command.assert_called_with('mon', 'osd reweight', id=1, weight=1.0)
401 self.assertStatus(200)
2a845540
TL
402
403 def _get_deployment_options(self, fake_client, devices_data: Dict[str, str]) -> Dict[str, Any]:
404 OsdHelper.mock_inventory_host(fake_client, devices_data)
405 self._get('/ui-api/osd/deployment_options')
406 self.assertStatus(200)
407 res = self.json_body()
408 return res
409
410 @mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
411 def test_deployment_options(self, instance):
412 fake_client = mock.Mock()
413 instance.return_value = fake_client
414 fake_client.get_missing_features.return_value = []
415
416 devices_data = [
417 {'type': 'hdd', 'path': '/dev/sda', 'host': 'host1'},
418 {'type': 'hdd', 'path': '/dev/sdc', 'host': 'host1'},
419 {'type': 'hdd', 'path': '/dev/sdb', 'host': 'host2'},
420 {'type': 'hdd', 'path': '/dev/sde', 'host': 'host1'},
421 {'type': 'hdd', 'path': '/dev/sdd', 'host': 'host2'},
422 ]
423
424 res = self._get_deployment_options(fake_client, devices_data)
425 self.assertTrue(res['options'][OsdDeploymentOptions.COST_CAPACITY]['available'])
426 assert res['recommended_option'] == OsdDeploymentOptions.COST_CAPACITY
427
428 # we don't want cost_capacity enabled without hdds
429 for data in devices_data:
430 data['type'] = 'ssd'
431
432 res = self._get_deployment_options(fake_client, devices_data)
433 self.assertFalse(res['options'][OsdDeploymentOptions.COST_CAPACITY]['available'])
434 self.assertFalse(res['options'][OsdDeploymentOptions.THROUGHPUT]['available'])
435 self.assertEqual(res['recommended_option'], None)
436
437 @mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
438 def test_deployment_options_throughput(self, instance):
439 fake_client = mock.Mock()
440 instance.return_value = fake_client
441 fake_client.get_missing_features.return_value = []
442
443 devices_data = [
444 {'type': 'ssd', 'path': '/dev/sda', 'host': 'host1'},
445 {'type': 'ssd', 'path': '/dev/sdc', 'host': 'host1'},
446 {'type': 'ssd', 'path': '/dev/sdb', 'host': 'host2'},
447 {'type': 'hdd', 'path': '/dev/sde', 'host': 'host1'},
448 {'type': 'hdd', 'path': '/dev/sdd', 'host': 'host2'},
449 ]
450
451 res = self._get_deployment_options(fake_client, devices_data)
452 self.assertTrue(res['options'][OsdDeploymentOptions.COST_CAPACITY]['available'])
453 self.assertTrue(res['options'][OsdDeploymentOptions.THROUGHPUT]['available'])
454 self.assertFalse(res['options'][OsdDeploymentOptions.IOPS]['available'])
455 assert res['recommended_option'] == OsdDeploymentOptions.THROUGHPUT
456
457 @mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
458 def test_deployment_options_with_hdds_and_nvmes(self, instance):
459 fake_client = mock.Mock()
460 instance.return_value = fake_client
461 fake_client.get_missing_features.return_value = []
462
463 devices_data = [
464 {'type': 'ssd', 'path': '/dev/nvme01', 'host': 'host1'},
465 {'type': 'ssd', 'path': '/dev/nvme02', 'host': 'host1'},
466 {'type': 'ssd', 'path': '/dev/nvme03', 'host': 'host2'},
467 {'type': 'hdd', 'path': '/dev/sde', 'host': 'host1'},
468 {'type': 'hdd', 'path': '/dev/sdd', 'host': 'host2'},
469 ]
470
471 res = self._get_deployment_options(fake_client, devices_data)
472 self.assertTrue(res['options'][OsdDeploymentOptions.COST_CAPACITY]['available'])
473 self.assertFalse(res['options'][OsdDeploymentOptions.THROUGHPUT]['available'])
474 self.assertTrue(res['options'][OsdDeploymentOptions.IOPS]['available'])
475 assert res['recommended_option'] == OsdDeploymentOptions.COST_CAPACITY
476
477 @mock.patch('dashboard.controllers.orchestrator.OrchClient.instance')
478 def test_deployment_options_iops(self, instance):
479 fake_client = mock.Mock()
480 instance.return_value = fake_client
481 fake_client.get_missing_features.return_value = []
482
483 devices_data = [
484 {'type': 'ssd', 'path': '/dev/nvme01', 'host': 'host1'},
485 {'type': 'ssd', 'path': '/dev/nvme02', 'host': 'host1'},
486 {'type': 'ssd', 'path': '/dev/nvme03', 'host': 'host2'}
487 ]
488
489 res = self._get_deployment_options(fake_client, devices_data)
490 self.assertFalse(res['options'][OsdDeploymentOptions.COST_CAPACITY]['available'])
491 self.assertFalse(res['options'][OsdDeploymentOptions.THROUGHPUT]['available'])
492 self.assertTrue(res['options'][OsdDeploymentOptions.IOPS]['available'])