]> git.proxmox.com Git - ceph.git/blame - ceph/src/pybind/mgr/dashboard/controllers/osd.py
bump version to 18.2.4-pve3
[ceph.git] / ceph / src / pybind / mgr / dashboard / controllers / osd.py
CommitLineData
11fdf7f2 1# -*- coding: utf-8 -*-
f67539c2 2
9f95a23c
TL
3import json
4import logging
5import time
f67539c2 6from typing import Any, Dict, List, Optional, Union
9f95a23c 7
f67539c2 8from ceph.deployment.drive_group import DriveGroupSpec, DriveGroupValidationError # type: ignore
9f95a23c
TL
9from mgr_util import get_most_recent_rate
10
9f95a23c
TL
11from .. import mgr
12from ..exceptions import DashboardException
11fdf7f2
TL
13from ..security import Scope
14from ..services.ceph_service import CephService, SendCommandError
f67539c2
TL
15from ..services.exception import handle_orchestrator_error, handle_send_command_error
16from ..services.orchestrator import OrchClient, OrchFeature
2a845540 17from ..services.osd import HostStorageSummary, OsdDeploymentOptions
11fdf7f2 18from ..tools import str_to_bool
a4b75251 19from . import APIDoc, APIRouter, CreatePermission, DeletePermission, Endpoint, \
2a845540
TL
20 EndpointDoc, ReadPermission, RESTController, Task, UIRouter, \
21 UpdatePermission, allow_empty_body
20effc67 22from ._version import APIVersion
f67539c2 23from .orchestrator import raise_if_no_orchestrator
11fdf7f2 24
9f95a23c
TL
25logger = logging.getLogger('controllers.osd')
26
f67539c2
TL
27SAFE_TO_DESTROY_SCHEMA = {
28 "safe_to_destroy": ([str], "Is OSD safe to destroy?"),
29 "active": ([int], ""),
30 "missing_stats": ([str], ""),
31 "stored_pgs": ([str], "Stored Pool groups in Osd"),
32 "is_safe_to_destroy": (bool, "Is OSD safe to destroy?")
33}
34
35EXPORT_FLAGS_SCHEMA = {
36 "list_of_flags": ([str], "")
37}
38
39EXPORT_INDIV_FLAGS_SCHEMA = {
40 "added": ([str], "List of added flags"),
41 "removed": ([str], "List of removed flags"),
42 "ids": ([int], "List of updated OSDs")
43}
44
45EXPORT_INDIV_FLAGS_GET_SCHEMA = {
46 "osd": (int, "OSD ID"),
47 "flags": ([str], "List of active flags")
48}
49
9f95a23c 50
2a845540
TL
51class DeploymentOptions:
52 def __init__(self):
53 self.options = {
54 OsdDeploymentOptions.COST_CAPACITY:
55 HostStorageSummary(OsdDeploymentOptions.COST_CAPACITY,
56 title='Cost/Capacity-optimized',
57 desc='All the available HDDs are selected'),
58 OsdDeploymentOptions.THROUGHPUT:
59 HostStorageSummary(OsdDeploymentOptions.THROUGHPUT,
60 title='Throughput-optimized',
61 desc="HDDs/SSDs are selected for data"
62 "devices and SSDs/NVMes for DB/WAL devices"),
63 OsdDeploymentOptions.IOPS:
64 HostStorageSummary(OsdDeploymentOptions.IOPS,
65 title='IOPS-optimized',
66 desc='All the available NVMes are selected'),
67 }
68 self.recommended_option = None
69
70 def as_dict(self):
71 return {
72 'options': {k: v.as_dict() for k, v in self.options.items()},
73 'recommended_option': self.recommended_option
74 }
75
76
77predefined_drive_groups = {
78 OsdDeploymentOptions.COST_CAPACITY: {
79 'service_type': 'osd',
80 'service_id': 'cost_capacity',
81 'placement': {
82 'host_pattern': '*'
83 },
84 'data_devices': {
85 'rotational': 1
86 },
87 'encrypted': False
88 },
89 OsdDeploymentOptions.THROUGHPUT: {
90 'service_type': 'osd',
91 'service_id': 'throughput_optimized',
92 'placement': {
93 'host_pattern': '*'
94 },
95 'data_devices': {
96 'rotational': 1
97 },
98 'db_devices': {
99 'rotational': 0
100 },
101 'encrypted': False
102 },
103 OsdDeploymentOptions.IOPS: {
104 'service_type': 'osd',
105 'service_id': 'iops_optimized',
106 'placement': {
107 'host_pattern': '*'
108 },
109 'data_devices': {
110 'rotational': 0
111 },
112 'encrypted': False
113 },
114}
115
116
9f95a23c
TL
117def osd_task(name, metadata, wait_for=2.0):
118 return Task("osd/{}".format(name), metadata, wait_for)
119
120
a4b75251
TL
121@APIRouter('/osd', Scope.OSD)
122@APIDoc('OSD management API', 'OSD')
11fdf7f2
TL
123class Osd(RESTController):
124 def list(self):
125 osds = self.get_osd_map()
126
127 # Extending by osd stats information
128 for stat in mgr.get('osd_stats')['osd_stats']:
129 if stat['osd'] in osds:
130 osds[stat['osd']]['osd_stats'] = stat
131
132 # Extending by osd node information
133 nodes = mgr.get('osd_map_tree')['nodes']
134 for node in nodes:
135 if node['type'] == 'osd' and node['id'] in osds:
136 osds[node['id']]['tree'] = node
137
138 # Extending by osd parent node information
139 for host in [n for n in nodes if n['type'] == 'host']:
140 for osd_id in host['children']:
141 if osd_id >= 0 and osd_id in osds:
142 osds[osd_id]['host'] = host
143
f67539c2
TL
144 removing_osd_ids = self.get_removing_osds()
145
146 # Extending by osd histogram and orchestrator data
11fdf7f2
TL
147 for osd_id, osd in osds.items():
148 osd['stats'] = {}
149 osd['stats_history'] = {}
150 osd_spec = str(osd_id)
151 if 'osd' not in osd:
f6b5b4d7 152 continue # pragma: no cover - simple early continue
1e59de90 153 self.gauge_stats(osd, osd_spec)
f67539c2 154 osd['operational_status'] = self._get_operational_status(osd_id, removing_osd_ids)
11fdf7f2
TL
155 return list(osds.values())
156
1e59de90
TL
157 @staticmethod
158 def gauge_stats(osd, osd_spec):
159 for stat in ['osd.op_w', 'osd.op_in_bytes', 'osd.op_r', 'osd.op_out_bytes']:
160 prop = stat.split('.')[1]
161 rates = CephService.get_rates('osd', osd_spec, stat)
162 osd['stats'][prop] = get_most_recent_rate(rates)
163 osd['stats_history'][prop] = rates
164 # Gauge stats
165 for stat in ['osd.numpg', 'osd.stat_bytes', 'osd.stat_bytes_used']:
166 osd['stats'][stat.split('.')[1]] = mgr.get_latest('osd', osd_spec, stat)
167
20effc67
TL
168 @RESTController.Collection('GET', version=APIVersion.EXPERIMENTAL)
169 @ReadPermission
170 def settings(self):
f38dd50b
TL
171 data = {
172 'nearfull_ratio': -1,
173 'full_ratio': -1
20effc67 174 }
f38dd50b
TL
175 try:
176 result = CephService.send_command('mon', 'osd dump')
177 data['nearfull_ratio'] = result['nearfull_ratio']
178 data['full_ratio'] = result['full_ratio']
179 except TypeError:
180 logger.error(
181 'Error setting nearfull_ratio and full_ratio:', exc_info=True)
182 return data
20effc67 183
f67539c2
TL
184 def _get_operational_status(self, osd_id: int, removing_osd_ids: Optional[List[int]]):
185 if removing_osd_ids is None:
186 return 'unmanaged'
187 if osd_id in removing_osd_ids:
188 return 'deleting'
189 return 'working'
190
191 @staticmethod
192 def get_removing_osds() -> Optional[List[int]]:
193 orch = OrchClient.instance()
194 if orch.available(features=[OrchFeature.OSD_GET_REMOVE_STATUS]):
195 return [osd.osd_id for osd in orch.osds.removing_status()]
196 return None
197
11fdf7f2
TL
198 @staticmethod
199 def get_osd_map(svc_id=None):
9f95a23c 200 # type: (Union[int, None]) -> Dict[int, Union[dict, Any]]
11fdf7f2
TL
201 def add_id(osd):
202 osd['id'] = osd['osd']
203 return osd
9f95a23c 204
11fdf7f2
TL
205 resp = {
206 osd['osd']: add_id(osd)
207 for osd in mgr.get('osd_map')['osds'] if svc_id is None or osd['osd'] == int(svc_id)
208 }
209 return resp if svc_id is None else resp[int(svc_id)]
210
9f95a23c
TL
211 @staticmethod
212 def _get_smart_data(osd_id):
213 # type: (str) -> dict
214 """Returns S.M.A.R.T data for the given OSD ID."""
f67539c2 215 logger.debug('[SMART] retrieving data from OSD with ID %s', osd_id)
9f95a23c
TL
216 return CephService.get_smart_data_by_daemon('osd', osd_id)
217
218 @RESTController.Resource('GET')
219 def smart(self, svc_id):
220 # type: (str) -> dict
221 return self._get_smart_data(svc_id)
222
11fdf7f2
TL
223 @handle_send_command_error('osd')
224 def get(self, svc_id):
225 """
226 Returns collected data about an OSD.
227
f67539c2 228 :return: Returns the requested data.
11fdf7f2 229 """
11fdf7f2
TL
230 return {
231 'osd_map': self.get_osd_map(svc_id),
232 'osd_metadata': mgr.get_metadata('osd', svc_id),
f67539c2
TL
233 'operational_status': self._get_operational_status(int(svc_id),
234 self.get_removing_osds())
11fdf7f2
TL
235 }
236
f91f0fd5
TL
237 @RESTController.Resource('GET')
238 @handle_send_command_error('osd')
239 def histogram(self, svc_id):
240 # type: (int) -> Dict[str, Any]
241 """
242 :return: Returns the histogram data.
243 """
244 try:
245 histogram = CephService.send_command(
246 'osd', srv_spec=svc_id, prefix='perf histogram dump')
247 except SendCommandError as e: # pragma: no cover - the handling is too obvious
248 raise DashboardException(
249 component='osd', http_status_code=400, msg=str(e))
250
251 return histogram
252
f6b5b4d7 253 def set(self, svc_id, device_class): # pragma: no cover
9f95a23c
TL
254 old_device_class = CephService.send_command('mon', 'osd crush get-device-class',
255 ids=[svc_id])
256 old_device_class = old_device_class[0]['device_class']
257 if old_device_class != device_class:
258 CephService.send_command('mon', 'osd crush rm-device-class',
259 ids=[svc_id])
260 if device_class:
261 CephService.send_command('mon', 'osd crush set-device-class', **{
262 'class': device_class,
263 'ids': [svc_id]
264 })
265
266 def _check_delete(self, osd_ids):
267 # type: (List[str]) -> Dict[str, Any]
268 """
269 Check if it's safe to remove OSD(s).
270
271 :param osd_ids: list of OSD IDs
272 :return: a dictionary contains the following attributes:
273 `safe`: bool, indicate if it's safe to remove OSDs.
274 `message`: str, help message if it's not safe to remove OSDs.
275 """
276 _ = osd_ids
277 health_data = mgr.get('health') # type: ignore
278 health = json.loads(health_data['json'])
279 checks = health['checks'].keys()
280 unsafe_checks = set(['OSD_FULL', 'OSD_BACKFILLFULL', 'OSD_NEARFULL'])
281 failed_checks = checks & unsafe_checks
282 msg = 'Removing OSD(s) is not recommended because of these failed health check(s): {}.'.\
283 format(', '.join(failed_checks)) if failed_checks else ''
284 return {
285 'safe': not bool(failed_checks),
286 'message': msg
287 }
288
289 @DeletePermission
f67539c2 290 @raise_if_no_orchestrator([OrchFeature.OSD_DELETE, OrchFeature.OSD_GET_REMOVE_STATUS])
9f95a23c
TL
291 @handle_orchestrator_error('osd')
292 @osd_task('delete', {'svc_id': '{svc_id}'})
f6b5b4d7
TL
293 def delete(self, svc_id, preserve_id=None, force=None): # pragma: no cover
294 replace = False
f67539c2 295 check: Union[Dict[str, Any], bool] = False
f6b5b4d7
TL
296 try:
297 if preserve_id is not None:
298 replace = str_to_bool(preserve_id)
299 if force is not None:
300 check = not str_to_bool(force)
301 except ValueError:
302 raise DashboardException(
303 component='osd', http_status_code=400, msg='Invalid parameter(s)')
9f95a23c 304 orch = OrchClient.instance()
f6b5b4d7 305 if check:
9f95a23c
TL
306 logger.info('Check for removing osd.%s...', svc_id)
307 check = self._check_delete([svc_id])
308 if not check['safe']:
309 logger.error('Unable to remove osd.%s: %s', svc_id, check['message'])
310 raise DashboardException(component='osd', msg=check['message'])
f6b5b4d7
TL
311
312 logger.info('Start removing osd.%s (replace: %s)...', svc_id, replace)
313 orch.osds.remove([svc_id], replace)
9f95a23c
TL
314 while True:
315 removal_osds = orch.osds.removing_status()
316 logger.info('Current removing OSDs %s', removal_osds)
f67539c2 317 pending = [osd for osd in removal_osds if osd.osd_id == int(svc_id)]
9f95a23c
TL
318 if not pending:
319 break
320 logger.info('Wait until osd.%s is removed...', svc_id)
321 time.sleep(60)
322
11fdf7f2
TL
323 @RESTController.Resource('POST', query_params=['deep'])
324 @UpdatePermission
f91f0fd5 325 @allow_empty_body
11fdf7f2
TL
326 def scrub(self, svc_id, deep=False):
327 api_scrub = "osd deep-scrub" if str_to_bool(deep) else "osd scrub"
328 CephService.send_command("mon", api_scrub, who=svc_id)
329
f67539c2
TL
330 @RESTController.Resource('PUT')
331 @EndpointDoc("Mark OSD flags (out, in, down, lost, ...)",
332 parameters={'svc_id': (str, 'SVC ID')})
333 def mark(self, svc_id, action):
334 """
335 Note: osd must be marked `down` before marking lost.
336 """
337 valid_actions = ['out', 'in', 'down', 'lost']
338 args = {'srv_type': 'mon', 'prefix': 'osd ' + action}
339 if action.lower() in valid_actions:
340 if action == 'lost':
341 args['id'] = int(svc_id)
342 args['yes_i_really_mean_it'] = True
343 else:
344 args['ids'] = [svc_id]
345
346 CephService.send_command(**args)
347 else:
348 logger.error("Invalid OSD mark action: %s attempted on SVC_ID: %s", action, svc_id)
11fdf7f2
TL
349
350 @RESTController.Resource('POST')
f91f0fd5 351 @allow_empty_body
11fdf7f2
TL
352 def reweight(self, svc_id, weight):
353 """
354 Reweights the OSD temporarily.
355
356 Note that ‘ceph osd reweight’ is not a persistent setting. When an OSD
357 gets marked out, the osd weight will be set to 0. When it gets marked
358 in again, the weight will be changed to 1.
359
360 Because of this ‘ceph osd reweight’ is a temporary solution. You should
361 only use it to keep your cluster running while you’re ordering more
362 hardware.
363
364 - Craig Lewis (http://lists.ceph.com/pipermail/ceph-users-ceph.com/2014-June/040967.html)
365 """
366 CephService.send_command(
367 'mon',
368 'osd reweight',
369 id=int(svc_id),
370 weight=float(weight))
371
2a845540
TL
372 def _create_predefined_drive_group(self, data):
373 orch = OrchClient.instance()
374 option = OsdDeploymentOptions(data[0]['option'])
375 if option in list(OsdDeploymentOptions):
376 try:
377 predefined_drive_groups[
378 option]['encrypted'] = data[0]['encrypted']
379 orch.osds.create([DriveGroupSpec.from_json(
380 predefined_drive_groups[option])])
1e59de90 381 except (ValueError, TypeError, KeyError, DriveGroupValidationError) as e:
2a845540
TL
382 raise DashboardException(e, component='osd')
383
9f95a23c
TL
384 def _create_bare(self, data):
385 """Create a OSD container that has no associated device.
386
387 :param data: contain attributes to create a bare OSD.
388 : `uuid`: will be set automatically if the OSD starts up
389 : `svc_id`: the ID is only used if a valid uuid is given.
11fdf7f2 390 """
9f95a23c
TL
391 try:
392 uuid = data['uuid']
393 svc_id = int(data['svc_id'])
394 except (KeyError, ValueError) as e:
395 raise DashboardException(e, component='osd', http_status_code=400)
396
11fdf7f2 397 result = CephService.send_command(
9f95a23c 398 'mon', 'osd create', id=svc_id, uuid=uuid)
11fdf7f2
TL
399 return {
400 'result': result,
9f95a23c 401 'svc_id': svc_id,
11fdf7f2
TL
402 'uuid': uuid,
403 }
404
f67539c2 405 @raise_if_no_orchestrator([OrchFeature.OSD_CREATE])
9f95a23c
TL
406 @handle_orchestrator_error('osd')
407 def _create_with_drive_groups(self, drive_groups):
408 """Create OSDs with DriveGroups."""
409 orch = OrchClient.instance()
410 try:
411 dg_specs = [DriveGroupSpec.from_json(dg) for dg in drive_groups]
412 orch.osds.create(dg_specs)
413 except (ValueError, TypeError, DriveGroupValidationError) as e:
414 raise DashboardException(e, component='osd')
415
416 @CreatePermission
417 @osd_task('create', {'tracking_id': '{tracking_id}'})
f67539c2 418 def create(self, method, data, tracking_id): # pylint: disable=unused-argument
9f95a23c
TL
419 if method == 'bare':
420 return self._create_bare(data)
421 if method == 'drive_groups':
422 return self._create_with_drive_groups(data)
2a845540
TL
423 if method == 'predefined':
424 return self._create_predefined_drive_group(data)
9f95a23c
TL
425 raise DashboardException(
426 component='osd', http_status_code=400, msg='Unknown method: {}'.format(method))
427
11fdf7f2 428 @RESTController.Resource('POST')
f91f0fd5 429 @allow_empty_body
11fdf7f2
TL
430 def purge(self, svc_id):
431 """
432 Note: osd must be marked `down` before removal.
433 """
434 CephService.send_command('mon', 'osd purge-actual', id=int(svc_id),
435 yes_i_really_mean_it=True)
436
437 @RESTController.Resource('POST')
f91f0fd5 438 @allow_empty_body
11fdf7f2
TL
439 def destroy(self, svc_id):
440 """
441 Mark osd as being destroyed. Keeps the ID intact (allowing reuse), but
442 removes cephx keys, config-key data and lockbox keys, rendering data
443 permanently unreadable.
444
445 The osd must be marked down before being destroyed.
446 """
447 CephService.send_command(
448 'mon', 'osd destroy-actual', id=int(svc_id), yes_i_really_mean_it=True)
449
9f95a23c
TL
450 @Endpoint('GET', query_params=['ids'])
451 @ReadPermission
f67539c2
TL
452 @EndpointDoc("Check If OSD is Safe to Destroy",
453 parameters={
454 'ids': (str, 'OSD Service Identifier'),
455 },
456 responses={200: SAFE_TO_DESTROY_SCHEMA})
9f95a23c 457 def safe_to_destroy(self, ids):
11fdf7f2 458 """
9f95a23c 459 :type ids: int|[int]
11fdf7f2 460 """
9f95a23c
TL
461
462 ids = json.loads(ids)
463 if isinstance(ids, list):
464 ids = list(map(str, ids))
465 else:
466 ids = [str(ids)]
467
11fdf7f2
TL
468 try:
469 result = CephService.send_command(
9f95a23c
TL
470 'mon', 'osd safe-to-destroy', ids=ids, target=('mgr', ''))
471 result['is_safe_to_destroy'] = set(result['safe_to_destroy']) == set(map(int, ids))
11fdf7f2
TL
472 return result
473
474 except SendCommandError as e:
475 return {
476 'message': str(e),
477 'is_safe_to_destroy': False,
478 }
479
9f95a23c
TL
480 @Endpoint('GET', query_params=['svc_ids'])
481 @ReadPermission
f67539c2 482 @raise_if_no_orchestrator()
9f95a23c
TL
483 @handle_orchestrator_error('osd')
484 def safe_to_delete(self, svc_ids):
485 """
486 :type ids: int|[int]
487 """
488 check = self._check_delete(svc_ids)
489 return {
490 'is_safe_to_delete': check.get('safe', False),
491 'message': check.get('message', '')
492 }
493
494 @RESTController.Resource('GET')
495 def devices(self, svc_id):
39ae355f
TL
496 # type: (str) -> Union[list, str]
497 devices: Union[list, str] = CephService.send_command(
498 'mon', 'device ls-by-daemon', who='osd.{}'.format(svc_id))
499 mgr_map = mgr.get('mgr_map')
500 available_modules = [m['name'] for m in mgr_map['available_modules']]
501
502 life_expectancy_enabled = any(
503 item.startswith('diskprediction_') for item in available_modules)
504 for device in devices:
505 device['life_expectancy_enabled'] = life_expectancy_enabled
506
507 return devices
9f95a23c 508
11fdf7f2 509
2a845540
TL
510@UIRouter('/osd', Scope.OSD)
511@APIDoc("Dashboard UI helper function; not part of the public API", "OsdUI")
512class OsdUi(Osd):
513 @Endpoint('GET')
514 @ReadPermission
515 @raise_if_no_orchestrator([OrchFeature.DAEMON_LIST])
516 @handle_orchestrator_error('host')
517 def deployment_options(self):
518 orch = OrchClient.instance()
519 hdds = 0
520 ssds = 0
521 nvmes = 0
522 res = DeploymentOptions()
523
524 for inventory_host in orch.inventory.list(hosts=None, refresh=True):
525 for device in inventory_host.devices.devices:
526 if device.available:
527 if device.human_readable_type == 'hdd':
528 hdds += 1
529 # SSDs and NVMe are both counted as 'ssd'
530 # so differentiating nvme using its path
531 elif '/dev/nvme' in device.path:
532 nvmes += 1
533 else:
534 ssds += 1
535
536 if hdds:
537 res.options[OsdDeploymentOptions.COST_CAPACITY].available = True
538 res.recommended_option = OsdDeploymentOptions.COST_CAPACITY
539 if hdds and ssds:
540 res.options[OsdDeploymentOptions.THROUGHPUT].available = True
541 res.recommended_option = OsdDeploymentOptions.THROUGHPUT
542 if nvmes:
543 res.options[OsdDeploymentOptions.IOPS].available = True
544
545 return res.as_dict()
546
547
a4b75251
TL
548@APIRouter('/osd/flags', Scope.OSD)
549@APIDoc(group='OSD')
11fdf7f2
TL
550class OsdFlagsController(RESTController):
551 @staticmethod
552 def _osd_flags():
553 enabled_flags = mgr.get('osd_map')['flags_set']
554 if 'pauserd' in enabled_flags and 'pausewr' in enabled_flags:
555 # 'pause' is set by calling `ceph osd set pause` and unset by
556 # calling `set osd unset pause`, but `ceph osd dump | jq '.flags'`
557 # will contain 'pauserd,pausewr' if pause is set.
558 # Let's pretend to the API that 'pause' is in fact a proper flag.
559 enabled_flags = list(
560 set(enabled_flags) - {'pauserd', 'pausewr'} | {'pause'})
561 return sorted(enabled_flags)
562
adb31ebb
TL
563 @staticmethod
564 def _update_flags(action, flags, ids=None):
565 if ids:
566 if flags:
567 ids = list(map(str, ids))
568 CephService.send_command('mon', 'osd ' + action, who=ids,
569 flags=','.join(flags))
570 else:
571 for flag in flags:
572 CephService.send_command('mon', 'osd ' + action, '', key=flag)
573
f67539c2
TL
574 @EndpointDoc("Display OSD Flags",
575 responses={200: EXPORT_FLAGS_SCHEMA})
11fdf7f2
TL
576 def list(self):
577 return self._osd_flags()
578
f67539c2
TL
579 @EndpointDoc('Sets OSD flags for the entire cluster.',
580 parameters={
581 'flags': ([str], 'List of flags to set. The flags `recovery_deletes`, '
582 '`sortbitwise` and `pglog_hardlimit` cannot be unset. '
583 'Additionally `purged_snapshots` cannot even be set.')
584 },
585 responses={200: EXPORT_FLAGS_SCHEMA})
11fdf7f2
TL
586 def bulk_set(self, flags):
587 """
588 The `recovery_deletes`, `sortbitwise` and `pglog_hardlimit` flags cannot be unset.
589 `purged_snapshots` cannot even be set. It is therefore required to at
590 least include those four flags for a successful operation.
591 """
592 assert isinstance(flags, list)
593
594 enabled_flags = set(self._osd_flags())
595 data = set(flags)
596 added = data - enabled_flags
597 removed = enabled_flags - data
adb31ebb
TL
598
599 self._update_flags('set', added)
600 self._update_flags('unset', removed)
601
11fdf7f2
TL
602 logger.info('Changed OSD flags: added=%s removed=%s', added, removed)
603
604 return sorted(enabled_flags - removed | added)
adb31ebb
TL
605
606 @Endpoint('PUT', 'individual')
607 @UpdatePermission
f67539c2
TL
608 @EndpointDoc('Sets OSD flags for a subset of individual OSDs.',
609 parameters={
610 'flags': ({'noout': (bool, 'Sets/unsets `noout`', True, None),
611 'noin': (bool, 'Sets/unsets `noin`', True, None),
612 'noup': (bool, 'Sets/unsets `noup`', True, None),
613 'nodown': (bool, 'Sets/unsets `nodown`', True, None)},
614 'Directory of flags to set or unset. The flags '
615 '`noin`, `noout`, `noup` and `nodown` are going to '
616 'be considered only.'),
617 'ids': ([int], 'List of OSD ids the flags should be applied '
618 'to.')
619 },
620 responses={200: EXPORT_INDIV_FLAGS_SCHEMA})
adb31ebb
TL
621 def set_individual(self, flags, ids):
622 """
623 Updates flags (`noout`, `noin`, `nodown`, `noup`) for an individual
624 subset of OSDs.
625 """
626 assert isinstance(flags, dict)
627 assert isinstance(ids, list)
628 assert all(isinstance(id, int) for id in ids)
629
630 # These are to only flags that can be applied to an OSD individually.
631 all_flags = {'noin', 'noout', 'nodown', 'noup'}
632 added = set()
633 removed = set()
634 for flag, activated in flags.items():
635 if flag in all_flags:
636 if activated is not None:
637 if activated:
638 added.add(flag)
639 else:
640 removed.add(flag)
641
642 self._update_flags('set-group', added, ids)
643 self._update_flags('unset-group', removed, ids)
644
645 logger.error('Changed individual OSD flags: added=%s removed=%s for ids=%s',
646 added, removed, ids)
647
648 return {'added': sorted(added),
649 'removed': sorted(removed),
650 'ids': ids}
651
652 @Endpoint('GET', 'individual')
653 @ReadPermission
f67539c2
TL
654 @EndpointDoc('Displays individual OSD flags',
655 responses={200: EXPORT_INDIV_FLAGS_GET_SCHEMA})
adb31ebb
TL
656 def get_individual(self):
657 osd_map = mgr.get('osd_map')['osds']
658 resp = []
659
660 for osd in osd_map:
661 resp.append({
662 'osd': osd['osd'],
663 'flags': osd['state']
664 })
665 return resp