]>
Commit | Line | Data |
---|---|---|
11fdf7f2 | 1 | # -*- coding: utf-8 -*- |
aee94f69 | 2 | import json |
39ae355f | 3 | import logging |
9f95a23c | 4 | import os |
f67539c2 | 5 | from collections import defaultdict |
aee94f69 | 6 | from typing import Any, Dict |
9f95a23c | 7 | |
9f95a23c | 8 | import cephfs |
f67539c2 | 9 | import cherrypy |
11fdf7f2 | 10 | |
11fdf7f2 TL |
11 | from .. import mgr |
12 | from ..exceptions import DashboardException | |
13 | from ..security import Scope | |
14 | from ..services.ceph_service import CephService | |
f67539c2 | 15 | from ..services.cephfs import CephFS as CephFS_ |
20effc67 | 16 | from ..services.exception import handle_cephfs_error |
aee94f69 TL |
17 | from ..tools import ViewCache, str_to_bool |
18 | from . import APIDoc, APIRouter, DeletePermission, Endpoint, EndpointDoc, \ | |
19 | RESTController, UIRouter, UpdatePermission, allow_empty_body | |
f67539c2 TL |
20 | |
21 | GET_QUOTAS_SCHEMA = { | |
22 | 'max_bytes': (int, ''), | |
23 | 'max_files': (int, '') | |
24 | } | |
11fdf7f2 | 25 | |
39ae355f TL |
26 | logger = logging.getLogger("controllers.rgw") |
27 | ||
11fdf7f2 | 28 | |
aee94f69 | 29 | # pylint: disable=R0904 |
a4b75251 TL |
30 | @APIRouter('/cephfs', Scope.CEPHFS) |
31 | @APIDoc("Cephfs Management API", "Cephfs") | |
11fdf7f2 | 32 | class CephFS(RESTController): |
f6b5b4d7 | 33 | def __init__(self): # pragma: no cover |
a4b75251 | 34 | super().__init__() |
11fdf7f2 TL |
35 | |
36 | # Stateful instances of CephFSClients, hold cached results. Key to | |
37 | # dict is FSCID | |
38 | self.cephfs_clients = {} | |
39 | ||
40 | def list(self): | |
41 | fsmap = mgr.get("fs_map") | |
42 | return fsmap['filesystems'] | |
43 | ||
aee94f69 TL |
44 | def create(self, name: str, service_spec: Dict[str, Any]): |
45 | service_spec_str = '1 ' | |
46 | if 'labels' in service_spec['placement']: | |
47 | for label in service_spec['placement']['labels']: | |
48 | service_spec_str += f'label:{label},' | |
49 | service_spec_str = service_spec_str[:-1] | |
50 | if 'hosts' in service_spec['placement']: | |
51 | for host in service_spec['placement']['hosts']: | |
52 | service_spec_str += f'{host},' | |
53 | service_spec_str = service_spec_str[:-1] | |
54 | ||
55 | error_code, _, err = mgr.remote('volumes', '_cmd_fs_volume_create', None, | |
56 | {'name': name, 'placement': service_spec_str}) | |
57 | if error_code != 0: | |
58 | raise RuntimeError( | |
59 | f'Error creating volume {name} with placement {str(service_spec)}: {err}') | |
60 | return f'Volume {name} created successfully' | |
61 | ||
62 | @EndpointDoc("Remove CephFS Volume", | |
63 | parameters={ | |
64 | 'name': (str, 'File System Name'), | |
65 | }) | |
66 | @allow_empty_body | |
67 | @Endpoint('DELETE') | |
68 | @DeletePermission | |
69 | def remove(self, name): | |
70 | error_code, _, err = mgr.remote('volumes', '_cmd_fs_volume_rm', None, | |
71 | {'vol_name': name, | |
72 | 'yes-i-really-mean-it': "--yes-i-really-mean-it"}) | |
73 | if error_code != 0: | |
74 | raise DashboardException( | |
75 | msg=f'Error deleting volume {name}: {err}', | |
76 | component='cephfs') | |
77 | return f'Volume {name} removed successfully' | |
78 | ||
79 | @EndpointDoc("Rename CephFS Volume", | |
80 | parameters={ | |
81 | 'name': (str, 'Existing FS Name'), | |
82 | 'new_name': (str, 'New FS Name'), | |
83 | }) | |
84 | @allow_empty_body | |
85 | @UpdatePermission | |
86 | @Endpoint('PUT') | |
87 | def rename(self, name: str, new_name: str): | |
88 | error_code, _, err = mgr.remote('volumes', '_cmd_fs_volume_rename', None, | |
89 | {'vol_name': name, 'new_vol_name': new_name, | |
90 | 'yes_i_really_mean_it': True}) | |
91 | if error_code != 0: | |
92 | raise DashboardException( | |
93 | msg=f'Error renaming volume {name} to {new_name}: {err}', | |
94 | component='cephfs') | |
95 | return f'Volume {name} renamed successfully to {new_name}' | |
96 | ||
11fdf7f2 TL |
97 | def get(self, fs_id): |
98 | fs_id = self.fs_id_to_int(fs_id) | |
11fdf7f2 TL |
99 | return self.fs_status(fs_id) |
100 | ||
101 | @RESTController.Resource('GET') | |
102 | def clients(self, fs_id): | |
103 | fs_id = self.fs_id_to_int(fs_id) | |
104 | ||
105 | return self._clients(fs_id) | |
106 | ||
9f95a23c TL |
107 | @RESTController.Resource('DELETE', path='/client/{client_id}') |
108 | def evict(self, fs_id, client_id): | |
109 | fs_id = self.fs_id_to_int(fs_id) | |
110 | client_id = self.client_id_to_int(client_id) | |
111 | ||
112 | return self._evict(fs_id, client_id) | |
113 | ||
11fdf7f2 | 114 | @RESTController.Resource('GET') |
9f95a23c TL |
115 | def mds_counters(self, fs_id, counters=None): |
116 | fs_id = self.fs_id_to_int(fs_id) | |
117 | return self._mds_counters(fs_id, counters) | |
118 | ||
119 | def _mds_counters(self, fs_id, counters=None): | |
11fdf7f2 TL |
120 | """ |
121 | Result format: map of daemon name to map of counter to list of datapoints | |
122 | rtype: dict[str, dict[str, list]] | |
123 | """ | |
124 | ||
9f95a23c TL |
125 | if counters is None: |
126 | # Opinionated list of interesting performance counters for the GUI | |
127 | counters = [ | |
128 | "mds_server.handle_client_request", | |
129 | "mds_log.ev", | |
130 | "mds_cache.num_strays", | |
131 | "mds.exported", | |
132 | "mds.exported_inodes", | |
133 | "mds.imported", | |
134 | "mds.imported_inodes", | |
135 | "mds.inodes", | |
136 | "mds.caps", | |
137 | "mds.subtrees", | |
138 | "mds_mem.ino" | |
139 | ] | |
140 | ||
f67539c2 | 141 | result: dict = {} |
11fdf7f2 TL |
142 | mds_names = self._get_mds_names(fs_id) |
143 | ||
144 | for mds_name in mds_names: | |
145 | result[mds_name] = {} | |
146 | for counter in counters: | |
147 | data = mgr.get_counter("mds", mds_name, counter) | |
148 | if data is not None: | |
149 | result[mds_name][counter] = data[counter] | |
150 | else: | |
151 | result[mds_name][counter] = [] | |
152 | ||
153 | return dict(result) | |
154 | ||
155 | @staticmethod | |
156 | def fs_id_to_int(fs_id): | |
157 | try: | |
158 | return int(fs_id) | |
159 | except ValueError: | |
160 | raise DashboardException(code='invalid_cephfs_id', | |
161 | msg="Invalid cephfs ID {}".format(fs_id), | |
162 | component='cephfs') | |
163 | ||
9f95a23c TL |
164 | @staticmethod |
165 | def client_id_to_int(client_id): | |
166 | try: | |
167 | return int(client_id) | |
168 | except ValueError: | |
169 | raise DashboardException(code='invalid_cephfs_client_id', | |
170 | msg="Invalid cephfs client ID {}".format(client_id), | |
171 | component='cephfs') | |
172 | ||
11fdf7f2 TL |
173 | def _get_mds_names(self, filesystem_id=None): |
174 | names = [] | |
175 | ||
176 | fsmap = mgr.get("fs_map") | |
177 | for fs in fsmap['filesystems']: | |
178 | if filesystem_id is not None and fs['id'] != filesystem_id: | |
179 | continue | |
180 | names.extend([info['name'] | |
181 | for _, info in fs['mdsmap']['info'].items()]) | |
182 | ||
183 | if filesystem_id is None: | |
184 | names.extend(info['name'] for info in fsmap['standbys']) | |
185 | ||
186 | return names | |
187 | ||
92f5a8d4 TL |
188 | def _append_mds_metadata(self, mds_versions, metadata_key): |
189 | metadata = mgr.get_metadata('mds', metadata_key) | |
190 | if metadata is None: | |
191 | return | |
192 | mds_versions[metadata.get('ceph_version', 'unknown')].append(metadata_key) | |
193 | ||
1e59de90 TL |
194 | def _find_standby_replays(self, mdsmap_info, rank_table): |
195 | # pylint: disable=unused-variable | |
196 | for gid_str, daemon_info in mdsmap_info.items(): | |
197 | if daemon_info['state'] != "up:standby-replay": | |
198 | continue | |
199 | ||
200 | inos = mgr.get_latest("mds", daemon_info['name'], "mds_mem.ino") | |
201 | dns = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dn") | |
202 | dirs = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dir") | |
203 | caps = mgr.get_latest("mds", daemon_info['name'], "mds_mem.cap") | |
204 | ||
205 | activity = CephService.get_rate( | |
206 | "mds", daemon_info['name'], "mds_log.replay") | |
207 | ||
208 | rank_table.append( | |
209 | { | |
210 | "rank": "{0}-s".format(daemon_info['rank']), | |
211 | "state": "standby-replay", | |
212 | "mds": daemon_info['name'], | |
213 | "activity": activity, | |
214 | "dns": dns, | |
215 | "inos": inos, | |
216 | "dirs": dirs, | |
217 | "caps": caps | |
218 | } | |
219 | ) | |
220 | ||
221 | def get_standby_table(self, standbys, mds_versions): | |
222 | standby_table = [] | |
223 | for standby in standbys: | |
224 | self._append_mds_metadata(mds_versions, standby['name']) | |
225 | standby_table.append({ | |
226 | 'name': standby['name'] | |
227 | }) | |
228 | return standby_table | |
229 | ||
11fdf7f2 TL |
230 | # pylint: disable=too-many-statements,too-many-branches |
231 | def fs_status(self, fs_id): | |
f67539c2 | 232 | mds_versions: dict = defaultdict(list) |
11fdf7f2 TL |
233 | |
234 | fsmap = mgr.get("fs_map") | |
235 | filesystem = None | |
236 | for fs in fsmap['filesystems']: | |
237 | if fs['id'] == fs_id: | |
238 | filesystem = fs | |
239 | break | |
240 | ||
241 | if filesystem is None: | |
242 | raise cherrypy.HTTPError(404, | |
243 | "CephFS id {0} not found".format(fs_id)) | |
244 | ||
245 | rank_table = [] | |
246 | ||
247 | mdsmap = filesystem['mdsmap'] | |
248 | ||
249 | client_count = 0 | |
250 | ||
251 | for rank in mdsmap["in"]: | |
252 | up = "mds_{0}".format(rank) in mdsmap["up"] | |
253 | if up: | |
254 | gid = mdsmap['up']["mds_{0}".format(rank)] | |
255 | info = mdsmap['info']['gid_{0}'.format(gid)] | |
494da23a | 256 | dns = mgr.get_latest("mds", info['name'], "mds_mem.dn") |
11fdf7f2 | 257 | inos = mgr.get_latest("mds", info['name'], "mds_mem.ino") |
f67539c2 TL |
258 | dirs = mgr.get_latest("mds", info['name'], "mds_mem.dir") |
259 | caps = mgr.get_latest("mds", info['name'], "mds_mem.cap") | |
11fdf7f2 | 260 | |
1e59de90 TL |
261 | # In case rank 0 was down, look at another rank's |
262 | # sessionmap to get an indication of clients. | |
263 | if rank == 0 or client_count == 0: | |
11fdf7f2 TL |
264 | client_count = mgr.get_latest("mds", info['name'], |
265 | "mds_sessions.session_count") | |
266 | ||
267 | laggy = "laggy_since" in info | |
268 | ||
269 | state = info['state'].split(":")[1] | |
270 | if laggy: | |
271 | state += "(laggy)" | |
272 | ||
273 | # Populate based on context of state, e.g. client | |
274 | # ops for an active daemon, replay progress, reconnect | |
275 | # progress | |
276 | if state == "active": | |
277 | activity = CephService.get_rate("mds", | |
278 | info['name'], | |
279 | "mds_server.handle_client_request") | |
280 | else: | |
f6b5b4d7 | 281 | activity = 0.0 # pragma: no cover |
11fdf7f2 | 282 | |
92f5a8d4 | 283 | self._append_mds_metadata(mds_versions, info['name']) |
11fdf7f2 TL |
284 | rank_table.append( |
285 | { | |
286 | "rank": rank, | |
287 | "state": state, | |
288 | "mds": info['name'], | |
289 | "activity": activity, | |
290 | "dns": dns, | |
f67539c2 TL |
291 | "inos": inos, |
292 | "dirs": dirs, | |
293 | "caps": caps | |
11fdf7f2 TL |
294 | } |
295 | ) | |
296 | ||
297 | else: | |
298 | rank_table.append( | |
299 | { | |
300 | "rank": rank, | |
301 | "state": "failed", | |
302 | "mds": "", | |
303 | "activity": 0.0, | |
304 | "dns": 0, | |
f67539c2 TL |
305 | "inos": 0, |
306 | "dirs": 0, | |
307 | "caps": 0 | |
11fdf7f2 TL |
308 | } |
309 | ) | |
310 | ||
1e59de90 | 311 | self._find_standby_replays(mdsmap['info'], rank_table) |
11fdf7f2 TL |
312 | |
313 | df = mgr.get("df") | |
314 | pool_stats = {p['id']: p['stats'] for p in df['pools']} | |
315 | osdmap = mgr.get("osd_map") | |
316 | pools = {p['pool']: p for p in osdmap['pools']} | |
317 | metadata_pool_id = mdsmap['metadata_pool'] | |
318 | data_pool_ids = mdsmap['data_pools'] | |
319 | ||
320 | pools_table = [] | |
321 | for pool_id in [metadata_pool_id] + data_pool_ids: | |
322 | pool_type = "metadata" if pool_id == metadata_pool_id else "data" | |
323 | stats = pool_stats[pool_id] | |
324 | pools_table.append({ | |
325 | "pool": pools[pool_id]['pool_name'], | |
326 | "type": pool_type, | |
f67539c2 | 327 | "used": stats['stored'], |
11fdf7f2 TL |
328 | "avail": stats['max_avail'] |
329 | }) | |
330 | ||
1e59de90 | 331 | standby_table = self.get_standby_table(fsmap['standbys'], mds_versions) |
11fdf7f2 TL |
332 | |
333 | return { | |
334 | "cephfs": { | |
335 | "id": fs_id, | |
336 | "name": mdsmap['fs_name'], | |
337 | "client_count": client_count, | |
338 | "ranks": rank_table, | |
339 | "pools": pools_table | |
340 | }, | |
341 | "standbys": standby_table, | |
342 | "versions": mds_versions | |
343 | } | |
344 | ||
345 | def _clients(self, fs_id): | |
346 | cephfs_clients = self.cephfs_clients.get(fs_id, None) | |
347 | if cephfs_clients is None: | |
348 | cephfs_clients = CephFSClients(mgr, fs_id) | |
349 | self.cephfs_clients[fs_id] = cephfs_clients | |
350 | ||
351 | try: | |
352 | status, clients = cephfs_clients.get() | |
353 | except AttributeError: | |
354 | raise cherrypy.HTTPError(404, | |
355 | "No cephfs with id {0}".format(fs_id)) | |
356 | ||
357 | if clients is None: | |
358 | raise cherrypy.HTTPError(404, | |
359 | "No cephfs with id {0}".format(fs_id)) | |
360 | ||
361 | # Decorate the metadata with some fields that will be | |
362 | # indepdendent of whether it's a kernel or userspace | |
363 | # client, so that the javascript doesn't have to grok that. | |
364 | for client in clients: | |
f6b5b4d7 | 365 | if "ceph_version" in client['client_metadata']: # pragma: no cover - no complexity |
11fdf7f2 TL |
366 | client['type'] = "userspace" |
367 | client['version'] = client['client_metadata']['ceph_version'] | |
368 | client['hostname'] = client['client_metadata']['hostname'] | |
cd265ab1 | 369 | client['root'] = client['client_metadata']['root'] |
f6b5b4d7 | 370 | elif "kernel_version" in client['client_metadata']: # pragma: no cover - no complexity |
11fdf7f2 TL |
371 | client['type'] = "kernel" |
372 | client['version'] = client['client_metadata']['kernel_version'] | |
373 | client['hostname'] = client['client_metadata']['hostname'] | |
cd265ab1 | 374 | client['root'] = client['client_metadata']['root'] |
f6b5b4d7 | 375 | else: # pragma: no cover - no complexity there |
11fdf7f2 TL |
376 | client['type'] = "unknown" |
377 | client['version'] = "" | |
378 | client['hostname'] = "" | |
379 | ||
380 | return { | |
381 | 'status': status, | |
382 | 'data': clients | |
383 | } | |
384 | ||
9f95a23c TL |
385 | def _evict(self, fs_id, client_id): |
386 | clients = self._clients(fs_id) | |
387 | if not [c for c in clients['data'] if c['id'] == client_id]: | |
388 | raise cherrypy.HTTPError(404, | |
389 | "Client {0} does not exist in cephfs {1}".format(client_id, | |
390 | fs_id)) | |
1e59de90 | 391 | filters = [f'id={client_id}'] |
9f95a23c | 392 | CephService.send_command('mds', 'client evict', |
1e59de90 | 393 | srv_spec='{0}:0'.format(fs_id), filters=filters) |
9f95a23c TL |
394 | |
395 | @staticmethod | |
396 | def _cephfs_instance(fs_id): | |
397 | """ | |
398 | :param fs_id: The filesystem identifier. | |
399 | :type fs_id: int | str | |
400 | :return: A instance of the CephFS class. | |
401 | """ | |
402 | fs_name = CephFS_.fs_name_from_id(fs_id) | |
403 | if fs_name is None: | |
404 | raise cherrypy.HTTPError(404, "CephFS id {} not found".format(fs_id)) | |
405 | return CephFS_(fs_name) | |
406 | ||
407 | @RESTController.Resource('GET') | |
408 | def get_root_directory(self, fs_id): | |
409 | """ | |
410 | The root directory that can't be fetched using ls_dir (api). | |
411 | :param fs_id: The filesystem identifier. | |
412 | :return: The root directory | |
413 | :rtype: dict | |
414 | """ | |
415 | try: | |
416 | return self._get_root_directory(self._cephfs_instance(fs_id)) | |
f6b5b4d7 | 417 | except (cephfs.PermissionError, cephfs.ObjectNotFound): # pragma: no cover |
9f95a23c TL |
418 | return None |
419 | ||
420 | def _get_root_directory(self, cfs): | |
421 | """ | |
422 | The root directory that can't be fetched using ls_dir (api). | |
423 | It's used in ls_dir (ui-api) and in get_root_directory (api). | |
424 | :param cfs: CephFS service instance | |
425 | :type cfs: CephFS | |
426 | :return: The root directory | |
427 | :rtype: dict | |
428 | """ | |
429 | return cfs.get_directory(os.sep.encode()) | |
430 | ||
20effc67 | 431 | @handle_cephfs_error() |
9f95a23c TL |
432 | @RESTController.Resource('GET') |
433 | def ls_dir(self, fs_id, path=None, depth=1): | |
434 | """ | |
435 | List directories of specified path. | |
436 | :param fs_id: The filesystem identifier. | |
437 | :param path: The path where to start listing the directory content. | |
f67539c2 | 438 | Defaults to '/' if not set. |
9f95a23c TL |
439 | :type path: str | bytes |
440 | :param depth: The number of steps to go down the directory tree. | |
441 | :type depth: int | str | |
442 | :return: The names of the directories below the specified path. | |
443 | :rtype: list | |
444 | """ | |
445 | path = self._set_ls_dir_path(path) | |
446 | try: | |
447 | cfs = self._cephfs_instance(fs_id) | |
448 | paths = cfs.ls_dir(path, depth) | |
f6b5b4d7 | 449 | except (cephfs.PermissionError, cephfs.ObjectNotFound): # pragma: no cover |
9f95a23c TL |
450 | paths = [] |
451 | return paths | |
452 | ||
453 | def _set_ls_dir_path(self, path): | |
454 | """ | |
455 | Transforms input path parameter of ls_dir methods (api and ui-api). | |
456 | :param path: The path where to start listing the directory content. | |
f67539c2 | 457 | Defaults to '/' if not set. |
9f95a23c TL |
458 | :type path: str | bytes |
459 | :return: Normalized path or root path | |
460 | :return: str | |
461 | """ | |
462 | if path is None: | |
463 | path = os.sep | |
464 | else: | |
465 | path = os.path.normpath(path) | |
466 | return path | |
467 | ||
f67539c2 | 468 | @RESTController.Resource('POST', path='/tree') |
f91f0fd5 | 469 | @allow_empty_body |
f67539c2 | 470 | def mk_tree(self, fs_id, path): |
9f95a23c TL |
471 | """ |
472 | Create a directory. | |
473 | :param fs_id: The filesystem identifier. | |
474 | :param path: The path of the directory. | |
475 | """ | |
476 | cfs = self._cephfs_instance(fs_id) | |
477 | cfs.mk_dirs(path) | |
478 | ||
f67539c2 TL |
479 | @RESTController.Resource('DELETE', path='/tree') |
480 | def rm_tree(self, fs_id, path): | |
9f95a23c TL |
481 | """ |
482 | Remove a directory. | |
483 | :param fs_id: The filesystem identifier. | |
484 | :param path: The path of the directory. | |
485 | """ | |
486 | cfs = self._cephfs_instance(fs_id) | |
487 | cfs.rm_dir(path) | |
488 | ||
f67539c2 | 489 | @RESTController.Resource('PUT', path='/quota') |
f91f0fd5 | 490 | @allow_empty_body |
f67539c2 | 491 | def quota(self, fs_id, path, max_bytes=None, max_files=None): |
9f95a23c | 492 | """ |
f67539c2 | 493 | Set the quotas of the specified path. |
9f95a23c | 494 | :param fs_id: The filesystem identifier. |
f67539c2 TL |
495 | :param path: The path of the directory/file. |
496 | :param max_bytes: The byte limit. | |
497 | :param max_files: The file limit. | |
9f95a23c TL |
498 | """ |
499 | cfs = self._cephfs_instance(fs_id) | |
f67539c2 | 500 | return cfs.set_quotas(path, max_bytes, max_files) |
9f95a23c | 501 | |
f67539c2 TL |
502 | @RESTController.Resource('GET', path='/quota') |
503 | @EndpointDoc("Get Cephfs Quotas of the specified path", | |
504 | parameters={ | |
505 | 'fs_id': (str, 'File System Identifier'), | |
506 | 'path': (str, 'File System Path'), | |
507 | }, | |
508 | responses={200: GET_QUOTAS_SCHEMA}) | |
509 | def get_quota(self, fs_id, path): | |
9f95a23c TL |
510 | """ |
511 | Get the quotas of the specified path. | |
512 | :param fs_id: The filesystem identifier. | |
513 | :param path: The path of the directory/file. | |
514 | :return: Returns a dictionary containing 'max_bytes' | |
f67539c2 | 515 | and 'max_files'. |
9f95a23c TL |
516 | :rtype: dict |
517 | """ | |
518 | cfs = self._cephfs_instance(fs_id) | |
519 | return cfs.get_quotas(path) | |
520 | ||
f67539c2 | 521 | @RESTController.Resource('POST', path='/snapshot') |
f91f0fd5 | 522 | @allow_empty_body |
f67539c2 | 523 | def snapshot(self, fs_id, path, name=None): |
9f95a23c | 524 | """ |
f67539c2 | 525 | Create a snapshot. |
9f95a23c | 526 | :param fs_id: The filesystem identifier. |
f67539c2 TL |
527 | :param path: The path of the directory. |
528 | :param name: The name of the snapshot. If not specified, a name using the | |
529 | current time in RFC3339 UTC format will be generated. | |
530 | :return: The name of the snapshot. | |
531 | :rtype: str | |
9f95a23c TL |
532 | """ |
533 | cfs = self._cephfs_instance(fs_id) | |
39ae355f TL |
534 | list_snaps = cfs.ls_snapshots(path) |
535 | for snap in list_snaps: | |
536 | if name == snap['name']: | |
537 | raise DashboardException(code='Snapshot name already in use', | |
538 | msg='Snapshot name {} is already in use.' | |
539 | 'Please use another name'.format(name), | |
540 | component='cephfs') | |
541 | ||
f67539c2 TL |
542 | return cfs.mk_snapshot(path, name) |
543 | ||
544 | @RESTController.Resource('DELETE', path='/snapshot') | |
545 | def rm_snapshot(self, fs_id, path, name): | |
546 | """ | |
547 | Remove a snapshot. | |
548 | :param fs_id: The filesystem identifier. | |
549 | :param path: The path of the directory. | |
550 | :param name: The name of the snapshot. | |
551 | """ | |
552 | cfs = self._cephfs_instance(fs_id) | |
553 | cfs.rm_snapshot(path, name) | |
9f95a23c | 554 | |
11fdf7f2 TL |
555 | |
556 | class CephFSClients(object): | |
557 | def __init__(self, module_inst, fscid): | |
558 | self._module = module_inst | |
559 | self.fscid = fscid | |
560 | ||
561 | @ViewCache() | |
562 | def get(self): | |
563 | return CephService.send_command('mds', 'session ls', srv_spec='{0}:0'.format(self.fscid)) | |
9f95a23c TL |
564 | |
565 | ||
a4b75251 TL |
566 | @UIRouter('/cephfs', Scope.CEPHFS) |
567 | @APIDoc("Dashboard UI helper function; not part of the public API", "CephFSUi") | |
9f95a23c TL |
568 | class CephFsUi(CephFS): |
569 | RESOURCE_ID = 'fs_id' | |
570 | ||
571 | @RESTController.Resource('GET') | |
572 | def tabs(self, fs_id): | |
573 | data = {} | |
574 | fs_id = self.fs_id_to_int(fs_id) | |
575 | ||
576 | # Needed for detail tab | |
577 | fs_status = self.fs_status(fs_id) | |
578 | for pool in fs_status['cephfs']['pools']: | |
579 | pool['size'] = pool['used'] + pool['avail'] | |
580 | data['pools'] = fs_status['cephfs']['pools'] | |
581 | data['ranks'] = fs_status['cephfs']['ranks'] | |
582 | data['name'] = fs_status['cephfs']['name'] | |
583 | data['standbys'] = ', '.join([x['name'] for x in fs_status['standbys']]) | |
584 | counters = self._mds_counters(fs_id) | |
585 | for k, v in counters.items(): | |
586 | v['name'] = k | |
587 | data['mds_counters'] = counters | |
588 | ||
589 | # Needed for client tab | |
590 | data['clients'] = self._clients(fs_id) | |
591 | ||
592 | return data | |
593 | ||
20effc67 | 594 | @handle_cephfs_error() |
9f95a23c TL |
595 | @RESTController.Resource('GET') |
596 | def ls_dir(self, fs_id, path=None, depth=1): | |
597 | """ | |
598 | The difference to the API version is that the root directory will be send when listing | |
599 | the root directory. | |
600 | To only do one request this endpoint was created. | |
601 | :param fs_id: The filesystem identifier. | |
602 | :type fs_id: int | str | |
603 | :param path: The path where to start listing the directory content. | |
f67539c2 | 604 | Defaults to '/' if not set. |
9f95a23c TL |
605 | :type path: str | bytes |
606 | :param depth: The number of steps to go down the directory tree. | |
607 | :type depth: int | str | |
608 | :return: The names of the directories below the specified path. | |
609 | :rtype: list | |
610 | """ | |
611 | path = self._set_ls_dir_path(path) | |
612 | try: | |
613 | cfs = self._cephfs_instance(fs_id) | |
614 | paths = cfs.ls_dir(path, depth) | |
615 | if path == os.sep: | |
616 | paths = [self._get_root_directory(cfs)] + paths | |
f6b5b4d7 | 617 | except (cephfs.PermissionError, cephfs.ObjectNotFound): # pragma: no cover |
9f95a23c TL |
618 | paths = [] |
619 | return paths | |
aee94f69 TL |
620 | |
621 | ||
622 | @APIRouter('/cephfs/subvolume', Scope.CEPHFS) | |
623 | @APIDoc('CephFS Subvolume Management API', 'CephFSSubvolume') | |
624 | class CephFSSubvolume(RESTController): | |
625 | ||
626 | def get(self, vol_name: str, group_name: str = ""): | |
627 | params = {'vol_name': vol_name} | |
628 | if group_name: | |
629 | params['group_name'] = group_name | |
630 | error_code, out, err = mgr.remote( | |
631 | 'volumes', '_cmd_fs_subvolume_ls', None, params) | |
632 | if error_code != 0: | |
633 | raise DashboardException( | |
634 | f'Failed to list subvolumes for volume {vol_name}: {err}' | |
635 | ) | |
636 | subvolumes = json.loads(out) | |
637 | for subvolume in subvolumes: | |
638 | params['sub_name'] = subvolume['name'] | |
639 | error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_info', None, | |
640 | params) | |
641 | if error_code != 0: | |
642 | raise DashboardException( | |
643 | f'Failed to get info for subvolume {subvolume["name"]}: {err}' | |
644 | ) | |
645 | subvolume['info'] = json.loads(out) | |
646 | return subvolumes | |
647 | ||
648 | @RESTController.Resource('GET') | |
649 | def info(self, vol_name: str, subvol_name: str, group_name: str = ""): | |
650 | params = {'vol_name': vol_name, 'sub_name': subvol_name} | |
651 | if group_name: | |
652 | params['group_name'] = group_name | |
653 | error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_info', None, | |
654 | params) | |
655 | if error_code != 0: | |
656 | raise DashboardException( | |
657 | f'Failed to get info for subvolume {subvol_name}: {err}' | |
658 | ) | |
659 | return json.loads(out) | |
660 | ||
661 | def create(self, vol_name: str, subvol_name: str, **kwargs): | |
662 | error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolume_create', None, { | |
663 | 'vol_name': vol_name, 'sub_name': subvol_name, **kwargs}) | |
664 | if error_code != 0: | |
665 | raise DashboardException( | |
666 | f'Failed to create subvolume {subvol_name}: {err}' | |
667 | ) | |
668 | ||
669 | return f'Subvolume {subvol_name} created successfully' | |
670 | ||
671 | def set(self, vol_name: str, subvol_name: str, size: str, group_name: str = ""): | |
672 | params = {'vol_name': vol_name, 'sub_name': subvol_name} | |
673 | if size: | |
674 | params['new_size'] = size | |
675 | if group_name: | |
676 | params['group_name'] = group_name | |
677 | error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolume_resize', None, | |
678 | params) | |
679 | if error_code != 0: | |
680 | raise DashboardException( | |
681 | f'Failed to update subvolume {subvol_name}: {err}' | |
682 | ) | |
683 | ||
684 | return f'Subvolume {subvol_name} updated successfully' | |
685 | ||
686 | def delete(self, vol_name: str, subvol_name: str, group_name: str = "", | |
687 | retain_snapshots: bool = False): | |
688 | params = {'vol_name': vol_name, 'sub_name': subvol_name} | |
689 | if group_name: | |
690 | params['group_name'] = group_name | |
691 | retain_snapshots = str_to_bool(retain_snapshots) | |
692 | if retain_snapshots: | |
693 | params['retain_snapshots'] = 'True' | |
694 | error_code, _, err = mgr.remote( | |
695 | 'volumes', '_cmd_fs_subvolume_rm', None, params) | |
696 | if error_code != 0: | |
697 | raise DashboardException( | |
698 | msg=f'Failed to remove subvolume {subvol_name}: {err}', | |
699 | component='cephfs') | |
700 | return f'Subvolume {subvol_name} removed successfully' | |
701 | ||
702 | ||
703 | @APIRouter('/cephfs/subvolume/group', Scope.CEPHFS) | |
704 | @APIDoc("Cephfs Subvolume Group Management API", "CephfsSubvolumeGroup") | |
705 | class CephFSSubvolumeGroups(RESTController): | |
706 | ||
707 | def get(self, vol_name): | |
708 | if not vol_name: | |
709 | raise DashboardException( | |
710 | f'Error listing subvolume groups for {vol_name}') | |
711 | error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_ls', | |
712 | None, {'vol_name': vol_name}) | |
713 | if error_code != 0: | |
714 | raise DashboardException( | |
715 | f'Error listing subvolume groups for {vol_name}') | |
716 | subvolume_groups = json.loads(out) | |
717 | for group in subvolume_groups: | |
718 | error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_info', | |
719 | None, {'vol_name': vol_name, | |
720 | 'group_name': group['name']}) | |
721 | if error_code != 0: | |
722 | raise DashboardException( | |
723 | f'Failed to get info for subvolume group {group["name"]}: {err}' | |
724 | ) | |
725 | group['info'] = json.loads(out) | |
726 | return subvolume_groups | |
727 | ||
728 | @RESTController.Resource('GET') | |
729 | def info(self, vol_name: str, group_name: str): | |
730 | error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_info', None, { | |
731 | 'vol_name': vol_name, 'group_name': group_name}) | |
732 | if error_code != 0: | |
733 | raise DashboardException( | |
734 | f'Failed to get info for subvolume group {group_name}: {err}' | |
735 | ) | |
736 | return json.loads(out) | |
737 | ||
738 | def create(self, vol_name: str, group_name: str, **kwargs): | |
739 | error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_create', None, { | |
740 | 'vol_name': vol_name, 'group_name': group_name, **kwargs}) | |
741 | if error_code != 0: | |
742 | raise DashboardException( | |
743 | f'Failed to create subvolume group {group_name}: {err}' | |
744 | ) | |
745 | ||
746 | def set(self, vol_name: str, group_name: str, size: str): | |
747 | if not size: | |
748 | return f'Failed to update subvolume group {group_name}, size was not provided' | |
749 | error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_resize', None, { | |
750 | 'vol_name': vol_name, 'group_name': group_name, 'new_size': size}) | |
751 | if error_code != 0: | |
752 | raise DashboardException( | |
753 | f'Failed to update subvolume group {group_name}: {err}' | |
754 | ) | |
755 | return f'Subvolume group {group_name} updated successfully' | |
756 | ||
757 | def delete(self, vol_name: str, group_name: str): | |
758 | error_code, _, err = mgr.remote( | |
759 | 'volumes', '_cmd_fs_subvolumegroup_rm', None, { | |
760 | 'vol_name': vol_name, 'group_name': group_name}) | |
761 | if error_code != 0: | |
762 | raise DashboardException( | |
763 | f'Failed to delete subvolume group {group_name}: {err}' | |
764 | ) | |
765 | return f'Subvolume group {group_name} removed successfully' |