]>
Commit | Line | Data |
---|---|---|
1 | # -*- coding: utf-8 -*- | |
2 | from __future__ import absolute_import | |
3 | ||
4 | import json | |
5 | import re | |
6 | ||
7 | from functools import partial | |
8 | ||
9 | import cherrypy | |
10 | ||
11 | import rbd | |
12 | ||
13 | from . import ApiController, Endpoint, Task, BaseController, ReadPermission, \ | |
14 | RESTController | |
15 | from .. import logger, mgr | |
16 | from ..security import Scope | |
17 | from ..services.ceph_service import CephService | |
18 | from ..tools import ViewCache | |
19 | from ..services.exception import handle_rados_error, handle_rbd_error, \ | |
20 | serialize_dashboard_exception | |
21 | ||
22 | ||
23 | # pylint: disable=not-callable | |
24 | def handle_rbd_mirror_error(): | |
25 | def composed_decorator(func): | |
26 | func = handle_rados_error('rbd-mirroring')(func) | |
27 | return handle_rbd_error()(func) | |
28 | return composed_decorator | |
29 | ||
30 | ||
31 | # pylint: disable=not-callable | |
32 | def RbdMirroringTask(name, metadata, wait_for): | |
33 | def composed_decorator(func): | |
34 | func = handle_rbd_mirror_error()(func) | |
35 | return Task("rbd/mirroring/{}".format(name), metadata, wait_for, | |
36 | partial(serialize_dashboard_exception, include_http_status=True))(func) | |
37 | return composed_decorator | |
38 | ||
39 | ||
40 | def _rbd_call(pool_name, func, *args, **kwargs): | |
41 | with mgr.rados.open_ioctx(pool_name) as ioctx: | |
42 | func(ioctx, *args, **kwargs) | |
43 | ||
44 | ||
45 | @ViewCache() | |
46 | def get_daemons_and_pools(): # pylint: disable=R0915 | |
47 | def get_daemons(): | |
48 | daemons = [] | |
49 | for hostname, server in CephService.get_service_map('rbd-mirror').items(): | |
50 | for service in server['services']: | |
51 | id = service['id'] # pylint: disable=W0622 | |
52 | metadata = service['metadata'] | |
53 | status = service['status'] or {} | |
54 | ||
55 | try: | |
56 | status = json.loads(status['json']) | |
57 | except (ValueError, KeyError) as _: | |
58 | status = {} | |
59 | ||
60 | instance_id = metadata['instance_id'] | |
61 | if id == instance_id: | |
62 | # new version that supports per-cluster leader elections | |
63 | id = metadata['id'] | |
64 | ||
65 | # extract per-daemon service data and health | |
66 | daemon = { | |
67 | 'id': id, | |
68 | 'instance_id': instance_id, | |
69 | 'version': metadata['ceph_version'], | |
70 | 'server_hostname': hostname, | |
71 | 'service': service, | |
72 | 'server': server, | |
73 | 'metadata': metadata, | |
74 | 'status': status | |
75 | } | |
76 | daemon = dict(daemon, **get_daemon_health(daemon)) | |
77 | daemons.append(daemon) | |
78 | ||
79 | return sorted(daemons, key=lambda k: k['instance_id']) | |
80 | ||
81 | def get_daemon_health(daemon): | |
82 | health = { | |
83 | 'health_color': 'info', | |
84 | 'health': 'Unknown' | |
85 | } | |
86 | for _, pool_data in daemon['status'].items(): | |
87 | if (health['health'] != 'error' | |
88 | and [k for k, v in pool_data.get('callouts', {}).items() | |
89 | if v['level'] == 'error']): | |
90 | health = { | |
91 | 'health_color': 'error', | |
92 | 'health': 'Error' | |
93 | } | |
94 | elif (health['health'] != 'error' | |
95 | and [k for k, v in pool_data.get('callouts', {}).items() | |
96 | if v['level'] == 'warning']): | |
97 | health = { | |
98 | 'health_color': 'warning', | |
99 | 'health': 'Warning' | |
100 | } | |
101 | elif health['health_color'] == 'info': | |
102 | health = { | |
103 | 'health_color': 'success', | |
104 | 'health': 'OK' | |
105 | } | |
106 | return health | |
107 | ||
108 | def get_pools(daemons): # pylint: disable=R0912, R0915 | |
109 | pool_names = [pool['pool_name'] for pool in CephService.get_pool_list('rbd') | |
110 | if pool.get('type', 1) == 1] | |
111 | pool_stats = {} | |
112 | rbdctx = rbd.RBD() | |
113 | for pool_name in pool_names: | |
114 | logger.debug("Constructing IOCtx %s", pool_name) | |
115 | try: | |
116 | ioctx = mgr.rados.open_ioctx(pool_name) | |
117 | except TypeError: | |
118 | logger.exception("Failed to open pool %s", pool_name) | |
119 | continue | |
120 | ||
121 | try: | |
122 | mirror_mode = rbdctx.mirror_mode_get(ioctx) | |
123 | peer_uuids = [x['uuid'] for x in rbdctx.mirror_peer_list(ioctx)] | |
124 | except: # noqa pylint: disable=W0702 | |
125 | logger.exception("Failed to query mirror settings %s", pool_name) | |
126 | mirror_mode = None | |
127 | peer_uuids = [] | |
128 | ||
129 | stats = {} | |
130 | if mirror_mode == rbd.RBD_MIRROR_MODE_DISABLED: | |
131 | mirror_mode = "disabled" | |
132 | stats['health_color'] = "info" | |
133 | stats['health'] = "Disabled" | |
134 | elif mirror_mode == rbd.RBD_MIRROR_MODE_IMAGE: | |
135 | mirror_mode = "image" | |
136 | elif mirror_mode == rbd.RBD_MIRROR_MODE_POOL: | |
137 | mirror_mode = "pool" | |
138 | else: | |
139 | mirror_mode = "unknown" | |
140 | stats['health_color'] = "warning" | |
141 | stats['health'] = "Warning" | |
142 | ||
143 | pool_stats[pool_name] = dict(stats, **{ | |
144 | 'mirror_mode': mirror_mode, | |
145 | 'peer_uuids': peer_uuids | |
146 | }) | |
147 | ||
148 | for daemon in daemons: | |
149 | for _, pool_data in daemon['status'].items(): | |
150 | stats = pool_stats.get(pool_data['name'], None) | |
151 | if stats is None: | |
152 | continue | |
153 | ||
154 | if pool_data.get('leader', False): | |
155 | # leader instance stores image counts | |
156 | stats['leader_id'] = daemon['metadata']['instance_id'] | |
157 | stats['image_local_count'] = pool_data.get('image_local_count', 0) | |
158 | stats['image_remote_count'] = pool_data.get('image_remote_count', 0) | |
159 | ||
160 | if (stats.get('health_color', '') != 'error' | |
161 | and pool_data.get('image_error_count', 0) > 0): | |
162 | stats['health_color'] = 'error' | |
163 | stats['health'] = 'Error' | |
164 | elif (stats.get('health_color', '') != 'error' | |
165 | and pool_data.get('image_warning_count', 0) > 0): | |
166 | stats['health_color'] = 'warning' | |
167 | stats['health'] = 'Warning' | |
168 | elif stats.get('health', None) is None: | |
169 | stats['health_color'] = 'success' | |
170 | stats['health'] = 'OK' | |
171 | ||
172 | for _, stats in pool_stats.items(): | |
173 | if stats['mirror_mode'] == 'disabled': | |
174 | continue | |
175 | if stats.get('health', None) is None: | |
176 | # daemon doesn't know about pool | |
177 | stats['health_color'] = 'error' | |
178 | stats['health'] = 'Error' | |
179 | elif stats.get('leader_id', None) is None: | |
180 | # no daemons are managing the pool as leader instance | |
181 | stats['health_color'] = 'warning' | |
182 | stats['health'] = 'Warning' | |
183 | return pool_stats | |
184 | ||
185 | daemons = get_daemons() | |
186 | return { | |
187 | 'daemons': daemons, | |
188 | 'pools': get_pools(daemons) | |
189 | } | |
190 | ||
191 | ||
192 | @ViewCache() | |
193 | def _get_pool_datum(pool_name): | |
194 | data = {} | |
195 | logger.debug("Constructing IOCtx %s", pool_name) | |
196 | try: | |
197 | ioctx = mgr.rados.open_ioctx(pool_name) | |
198 | except TypeError: | |
199 | logger.exception("Failed to open pool %s", pool_name) | |
200 | return None | |
201 | ||
202 | mirror_state = { | |
203 | 'down': { | |
204 | 'health': 'issue', | |
205 | 'state_color': 'warning', | |
206 | 'state': 'Unknown', | |
207 | 'description': None | |
208 | }, | |
209 | rbd.MIRROR_IMAGE_STATUS_STATE_UNKNOWN: { | |
210 | 'health': 'issue', | |
211 | 'state_color': 'warning', | |
212 | 'state': 'Unknown' | |
213 | }, | |
214 | rbd.MIRROR_IMAGE_STATUS_STATE_ERROR: { | |
215 | 'health': 'issue', | |
216 | 'state_color': 'error', | |
217 | 'state': 'Error' | |
218 | }, | |
219 | rbd.MIRROR_IMAGE_STATUS_STATE_SYNCING: { | |
220 | 'health': 'syncing' | |
221 | }, | |
222 | rbd.MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY: { | |
223 | 'health': 'ok', | |
224 | 'state_color': 'success', | |
225 | 'state': 'Starting' | |
226 | }, | |
227 | rbd.MIRROR_IMAGE_STATUS_STATE_REPLAYING: { | |
228 | 'health': 'ok', | |
229 | 'state_color': 'success', | |
230 | 'state': 'Replaying' | |
231 | }, | |
232 | rbd.MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY: { | |
233 | 'health': 'ok', | |
234 | 'state_color': 'success', | |
235 | 'state': 'Stopping' | |
236 | }, | |
237 | rbd.MIRROR_IMAGE_STATUS_STATE_STOPPED: { | |
238 | 'health': 'ok', | |
239 | 'state_color': 'info', | |
240 | 'state': 'Primary' | |
241 | } | |
242 | } | |
243 | ||
244 | rbdctx = rbd.RBD() | |
245 | try: | |
246 | mirror_image_status = rbdctx.mirror_image_status_list(ioctx) | |
247 | data['mirror_images'] = sorted([ | |
248 | dict({ | |
249 | 'name': image['name'], | |
250 | 'description': image['description'] | |
251 | }, **mirror_state['down' if not image['up'] else image['state']]) | |
252 | for image in mirror_image_status | |
253 | ], key=lambda k: k['name']) | |
254 | except rbd.ImageNotFound: | |
255 | pass | |
256 | except: # noqa pylint: disable=W0702 | |
257 | logger.exception("Failed to list mirror image status %s", pool_name) | |
258 | raise | |
259 | ||
260 | return data | |
261 | ||
262 | ||
263 | @ViewCache() | |
264 | def _get_content_data(): # pylint: disable=R0914 | |
265 | pool_names = [pool['pool_name'] for pool in CephService.get_pool_list('rbd') | |
266 | if pool.get('type', 1) == 1] | |
267 | _, data = get_daemons_and_pools() | |
268 | daemons = data.get('daemons', []) | |
269 | pool_stats = data.get('pools', {}) | |
270 | ||
271 | pools = [] | |
272 | image_error = [] | |
273 | image_syncing = [] | |
274 | image_ready = [] | |
275 | for pool_name in pool_names: | |
276 | _, pool = _get_pool_datum(pool_name) | |
277 | if not pool: | |
278 | pool = {} | |
279 | ||
280 | stats = pool_stats.get(pool_name, {}) | |
281 | if stats.get('mirror_mode', None) is None: | |
282 | continue | |
283 | ||
284 | mirror_images = pool.get('mirror_images', []) | |
285 | for mirror_image in mirror_images: | |
286 | image = { | |
287 | 'pool_name': pool_name, | |
288 | 'name': mirror_image['name'] | |
289 | } | |
290 | ||
291 | if mirror_image['health'] == 'ok': | |
292 | image.update({ | |
293 | 'state_color': mirror_image['state_color'], | |
294 | 'state': mirror_image['state'], | |
295 | 'description': mirror_image['description'] | |
296 | }) | |
297 | image_ready.append(image) | |
298 | elif mirror_image['health'] == 'syncing': | |
299 | p = re.compile("bootstrapping, IMAGE_COPY/COPY_OBJECT (.*)%") | |
300 | image.update({ | |
301 | 'progress': (p.findall(mirror_image['description']) or [0])[0] | |
302 | }) | |
303 | image_syncing.append(image) | |
304 | else: | |
305 | image.update({ | |
306 | 'state_color': mirror_image['state_color'], | |
307 | 'state': mirror_image['state'], | |
308 | 'description': mirror_image['description'] | |
309 | }) | |
310 | image_error.append(image) | |
311 | ||
312 | pools.append(dict({ | |
313 | 'name': pool_name | |
314 | }, **stats)) | |
315 | ||
316 | return { | |
317 | 'daemons': daemons, | |
318 | 'pools': pools, | |
319 | 'image_error': image_error, | |
320 | 'image_syncing': image_syncing, | |
321 | 'image_ready': image_ready | |
322 | } | |
323 | ||
324 | ||
325 | def _reset_view_cache(): | |
326 | get_daemons_and_pools.reset() | |
327 | _get_pool_datum.reset() | |
328 | _get_content_data.reset() | |
329 | ||
330 | ||
331 | @ApiController('/block/mirroring/summary', Scope.RBD_MIRRORING) | |
332 | class RbdMirroringSummary(BaseController): | |
333 | ||
334 | @Endpoint() | |
335 | @handle_rbd_mirror_error() | |
336 | @ReadPermission | |
337 | def __call__(self): | |
338 | status, content_data = _get_content_data() | |
339 | return {'status': status, 'content_data': content_data} | |
340 | ||
341 | ||
342 | @ApiController('/block/mirroring/pool', Scope.RBD_MIRRORING) | |
343 | class RbdMirroringPoolMode(RESTController): | |
344 | ||
345 | RESOURCE_ID = "pool_name" | |
346 | MIRROR_MODES = { | |
347 | rbd.RBD_MIRROR_MODE_DISABLED: 'disabled', | |
348 | rbd.RBD_MIRROR_MODE_IMAGE: 'image', | |
349 | rbd.RBD_MIRROR_MODE_POOL: 'pool' | |
350 | } | |
351 | ||
352 | @handle_rbd_mirror_error() | |
353 | def get(self, pool_name): | |
354 | ioctx = mgr.rados.open_ioctx(pool_name) | |
355 | mode = rbd.RBD().mirror_mode_get(ioctx) | |
356 | data = { | |
357 | 'mirror_mode': self.MIRROR_MODES.get(mode, 'unknown') | |
358 | } | |
359 | return data | |
360 | ||
361 | @RbdMirroringTask('pool/edit', {'pool_name': '{pool_name}'}, 5.0) | |
362 | def set(self, pool_name, mirror_mode=None): | |
363 | def _edit(ioctx, mirror_mode=None): | |
364 | if mirror_mode: | |
365 | mode_enum = {x[1]: x[0] for x in | |
366 | self.MIRROR_MODES.items()}.get(mirror_mode, None) | |
367 | if mode_enum is None: | |
368 | raise rbd.Error('invalid mirror mode "{}"'.format(mirror_mode)) | |
369 | ||
370 | current_mode_enum = rbd.RBD().mirror_mode_get(ioctx) | |
371 | if mode_enum != current_mode_enum: | |
372 | rbd.RBD().mirror_mode_set(ioctx, mode_enum) | |
373 | _reset_view_cache() | |
374 | ||
375 | return _rbd_call(pool_name, _edit, mirror_mode) | |
376 | ||
377 | ||
378 | @ApiController('/block/mirroring/pool/{pool_name}/peer', Scope.RBD_MIRRORING) | |
379 | class RbdMirroringPoolPeer(RESTController): | |
380 | ||
381 | RESOURCE_ID = "peer_uuid" | |
382 | ||
383 | @handle_rbd_mirror_error() | |
384 | def list(self, pool_name): | |
385 | ioctx = mgr.rados.open_ioctx(pool_name) | |
386 | peer_list = rbd.RBD().mirror_peer_list(ioctx) | |
387 | return [x['uuid'] for x in peer_list] | |
388 | ||
389 | @handle_rbd_mirror_error() | |
390 | def create(self, pool_name, cluster_name, client_id, mon_host=None, | |
391 | key=None): | |
392 | ioctx = mgr.rados.open_ioctx(pool_name) | |
393 | mode = rbd.RBD().mirror_mode_get(ioctx) | |
394 | if mode == rbd.RBD_MIRROR_MODE_DISABLED: | |
395 | raise rbd.Error('mirroring must be enabled') | |
396 | ||
397 | uuid = rbd.RBD().mirror_peer_add(ioctx, cluster_name, | |
398 | 'client.{}'.format(client_id)) | |
399 | ||
400 | attributes = {} | |
401 | if mon_host is not None: | |
402 | attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST] = mon_host | |
403 | if key is not None: | |
404 | attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY] = key | |
405 | if attributes: | |
406 | rbd.RBD().mirror_peer_set_attributes(ioctx, uuid, attributes) | |
407 | ||
408 | _reset_view_cache() | |
409 | return {'uuid': uuid} | |
410 | ||
411 | @handle_rbd_mirror_error() | |
412 | def get(self, pool_name, peer_uuid): | |
413 | ioctx = mgr.rados.open_ioctx(pool_name) | |
414 | peer_list = rbd.RBD().mirror_peer_list(ioctx) | |
415 | peer = next((x for x in peer_list if x['uuid'] == peer_uuid), None) | |
416 | if not peer: | |
417 | raise cherrypy.HTTPError(404) | |
418 | ||
419 | # convert full client name to just the client id | |
420 | peer['client_id'] = peer['client_name'].split('.', 1)[-1] | |
421 | del peer['client_name'] | |
422 | ||
423 | try: | |
424 | attributes = rbd.RBD().mirror_peer_get_attributes(ioctx, peer_uuid) | |
425 | except rbd.ImageNotFound: | |
426 | attributes = {} | |
427 | ||
428 | peer['mon_host'] = attributes.get(rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST, '') | |
429 | peer['key'] = attributes.get(rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY, '') | |
430 | return peer | |
431 | ||
432 | @handle_rbd_mirror_error() | |
433 | def delete(self, pool_name, peer_uuid): | |
434 | ioctx = mgr.rados.open_ioctx(pool_name) | |
435 | rbd.RBD().mirror_peer_remove(ioctx, peer_uuid) | |
436 | _reset_view_cache() | |
437 | ||
438 | @handle_rbd_mirror_error() | |
439 | def set(self, pool_name, peer_uuid, cluster_name=None, client_id=None, | |
440 | mon_host=None, key=None): | |
441 | ioctx = mgr.rados.open_ioctx(pool_name) | |
442 | if cluster_name: | |
443 | rbd.RBD().mirror_peer_set_cluster(ioctx, peer_uuid, cluster_name) | |
444 | if client_id: | |
445 | rbd.RBD().mirror_peer_set_client(ioctx, peer_uuid, | |
446 | 'client.{}'.format(client_id)) | |
447 | ||
448 | if mon_host is not None or key is not None: | |
449 | try: | |
450 | attributes = rbd.RBD().mirror_peer_get_attributes(ioctx, peer_uuid) | |
451 | except rbd.ImageNotFound: | |
452 | attributes = {} | |
453 | ||
454 | if mon_host is not None: | |
455 | attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST] = mon_host | |
456 | if key is not None: | |
457 | attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY] = key | |
458 | rbd.RBD().mirror_peer_set_attributes(ioctx, peer_uuid, attributes) | |
459 | ||
460 | _reset_view_cache() |