]>
Commit | Line | Data |
---|---|---|
11fdf7f2 | 1 | # -*- coding: utf-8 -*- |
11fdf7f2 | 2 | |
a4b75251 | 3 | import json |
9f95a23c | 4 | import logging |
f91f0fd5 TL |
5 | import os |
6 | from functools import partial | |
a4b75251 | 7 | from typing import Any, Dict, List, Optional |
11fdf7f2 | 8 | |
11fdf7f2 | 9 | import cephfs |
a4b75251 | 10 | from mgr_module import NFS_GANESHA_SUPPORTED_FSALS |
11fdf7f2 | 11 | |
a4b75251 | 12 | from .. import mgr |
11fdf7f2 TL |
13 | from ..security import Scope |
14 | from ..services.cephfs import CephFS | |
39ae355f TL |
15 | from ..services.exception import DashboardException, handle_cephfs_error, \ |
16 | serialize_dashboard_exception | |
a4b75251 TL |
17 | from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \ |
18 | ReadPermission, RESTController, Task, UIRouter | |
19 | from ._version import APIVersion | |
11fdf7f2 | 20 | |
a4b75251 TL |
21 | logger = logging.getLogger('controllers.nfs') |
22 | ||
23 | ||
24 | class NFSException(DashboardException): | |
25 | def __init__(self, msg): | |
26 | super(NFSException, self).__init__(component="nfs", msg=msg) | |
9f95a23c TL |
27 | |
28 | ||
11fdf7f2 TL |
29 | # documentation helpers |
30 | EXPORT_SCHEMA = { | |
31 | 'export_id': (int, 'Export ID'), | |
32 | 'path': (str, 'Export path'), | |
33 | 'cluster_id': (str, 'Cluster identifier'), | |
11fdf7f2 | 34 | 'pseudo': (str, 'Pseudo FS path'), |
11fdf7f2 TL |
35 | 'access_type': (str, 'Export access type'), |
36 | 'squash': (str, 'Export squash policy'), | |
37 | 'security_label': (str, 'Security label'), | |
38 | 'protocols': ([int], 'List of protocol types'), | |
39 | 'transports': ([str], 'List of transport types'), | |
40 | 'fsal': ({ | |
41 | 'name': (str, 'name of FSAL'), | |
a4b75251 | 42 | 'fs_name': (str, 'CephFS filesystem name', True), |
11fdf7f2 | 43 | 'sec_label_xattr': (str, 'Name of xattr for security label', True), |
a4b75251 | 44 | 'user_id': (str, 'User id', True) |
11fdf7f2 TL |
45 | }, 'FSAL configuration'), |
46 | 'clients': ([{ | |
47 | 'addresses': ([str], 'list of IP addresses'), | |
48 | 'access_type': (str, 'Client access type'), | |
49 | 'squash': (str, 'Client squash policy') | |
50 | }], 'List of client configurations'), | |
51 | } | |
52 | ||
53 | ||
54 | CREATE_EXPORT_SCHEMA = { | |
55 | 'path': (str, 'Export path'), | |
56 | 'cluster_id': (str, 'Cluster identifier'), | |
11fdf7f2 | 57 | 'pseudo': (str, 'Pseudo FS path'), |
11fdf7f2 TL |
58 | 'access_type': (str, 'Export access type'), |
59 | 'squash': (str, 'Export squash policy'), | |
60 | 'security_label': (str, 'Security label'), | |
61 | 'protocols': ([int], 'List of protocol types'), | |
62 | 'transports': ([str], 'List of transport types'), | |
63 | 'fsal': ({ | |
64 | 'name': (str, 'name of FSAL'), | |
a4b75251 TL |
65 | 'fs_name': (str, 'CephFS filesystem name', True), |
66 | 'sec_label_xattr': (str, 'Name of xattr for security label', True) | |
11fdf7f2 TL |
67 | }, 'FSAL configuration'), |
68 | 'clients': ([{ | |
69 | 'addresses': ([str], 'list of IP addresses'), | |
70 | 'access_type': (str, 'Client access type'), | |
71 | 'squash': (str, 'Client squash policy') | |
a4b75251 | 72 | }], 'List of client configurations') |
11fdf7f2 TL |
73 | } |
74 | ||
75 | ||
76 | # pylint: disable=not-callable | |
9f95a23c | 77 | def NfsTask(name, metadata, wait_for): # noqa: N802 |
11fdf7f2 TL |
78 | def composed_decorator(func): |
79 | return Task("nfs/{}".format(name), metadata, wait_for, | |
80 | partial(serialize_dashboard_exception, | |
81 | include_http_status=True))(func) | |
82 | return composed_decorator | |
83 | ||
84 | ||
a4b75251 | 85 | @APIRouter('/nfs-ganesha/cluster', Scope.NFS_GANESHA) |
2a845540 | 86 | @APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha") |
a4b75251 TL |
87 | class NFSGaneshaCluster(RESTController): |
88 | @ReadPermission | |
89 | @RESTController.MethodMap(version=APIVersion.EXPERIMENTAL) | |
90 | def list(self): | |
91 | return mgr.remote('nfs', 'cluster_ls') | |
92 | ||
93 | ||
94 | @APIRouter('/nfs-ganesha/export', Scope.NFS_GANESHA) | |
95 | @APIDoc(group="NFS-Ganesha") | |
11fdf7f2 TL |
96 | class NFSGaneshaExports(RESTController): |
97 | RESOURCE_ID = "cluster_id/export_id" | |
98 | ||
a4b75251 TL |
99 | @staticmethod |
100 | def _get_schema_export(export: Dict[str, Any]) -> Dict[str, Any]: | |
101 | """ | |
102 | Method that avoids returning export info not exposed in the export schema | |
103 | e.g., rgw user access/secret keys. | |
104 | """ | |
105 | schema_fsal_info = {} | |
106 | for key in export['fsal'].keys(): | |
107 | if key in EXPORT_SCHEMA['fsal'][0].keys(): # type: ignore | |
108 | schema_fsal_info[key] = export['fsal'][key] | |
109 | export['fsal'] = schema_fsal_info | |
110 | return export | |
111 | ||
11fdf7f2 TL |
112 | @EndpointDoc("List all NFS-Ganesha exports", |
113 | responses={200: [EXPORT_SCHEMA]}) | |
a4b75251 TL |
114 | def list(self) -> List[Dict[str, Any]]: |
115 | exports = [] | |
116 | for export in mgr.remote('nfs', 'export_ls'): | |
117 | exports.append(self._get_schema_export(export)) | |
118 | ||
119 | return exports | |
11fdf7f2 | 120 | |
39ae355f | 121 | @handle_cephfs_error() |
11fdf7f2 TL |
122 | @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}', |
123 | 'cluster_id': '{cluster_id}'}, 2.0) | |
124 | @EndpointDoc("Creates a new NFS-Ganesha export", | |
125 | parameters=CREATE_EXPORT_SCHEMA, | |
126 | responses={201: EXPORT_SCHEMA}) | |
a4b75251 TL |
127 | @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore |
128 | def create(self, path, cluster_id, pseudo, access_type, | |
129 | squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]: | |
130 | export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj') | |
131 | if export_mgr.get_export_by_pseudo(cluster_id, pseudo): | |
132 | raise DashboardException(msg=f'Pseudo {pseudo} is already in use.', | |
133 | component='nfs') | |
134 | if hasattr(fsal, 'user_id'): | |
135 | fsal.pop('user_id') # mgr/nfs does not let you customize user_id | |
136 | raw_ex = { | |
11fdf7f2 TL |
137 | 'path': path, |
138 | 'pseudo': pseudo, | |
139 | 'cluster_id': cluster_id, | |
11fdf7f2 TL |
140 | 'access_type': access_type, |
141 | 'squash': squash, | |
142 | 'security_label': security_label, | |
143 | 'protocols': protocols, | |
144 | 'transports': transports, | |
145 | 'fsal': fsal, | |
146 | 'clients': clients | |
a4b75251 TL |
147 | } |
148 | ret, _, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex)) | |
149 | if ret == 0: | |
150 | return self._get_schema_export( | |
151 | export_mgr.get_export_by_pseudo(cluster_id, pseudo)) | |
152 | raise NFSException(f"Export creation failed {err}") | |
11fdf7f2 TL |
153 | |
154 | @EndpointDoc("Get an NFS-Ganesha export", | |
155 | parameters={ | |
156 | 'cluster_id': (str, 'Cluster identifier'), | |
a4b75251 | 157 | 'export_id': (str, "Export ID") |
11fdf7f2 TL |
158 | }, |
159 | responses={200: EXPORT_SCHEMA}) | |
a4b75251 | 160 | def get(self, cluster_id, export_id) -> Optional[Dict[str, Any]]: |
11fdf7f2 | 161 | export_id = int(export_id) |
a4b75251 TL |
162 | export = mgr.remote('nfs', 'export_get', cluster_id, export_id) |
163 | if export: | |
164 | export = self._get_schema_export(export) | |
165 | ||
166 | return export | |
11fdf7f2 TL |
167 | |
168 | @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'}, | |
169 | 2.0) | |
170 | @EndpointDoc("Updates an NFS-Ganesha export", | |
171 | parameters=dict(export_id=(int, "Export ID"), | |
172 | **CREATE_EXPORT_SCHEMA), | |
173 | responses={200: EXPORT_SCHEMA}) | |
a4b75251 TL |
174 | @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore |
175 | def set(self, cluster_id, export_id, path, pseudo, access_type, | |
176 | squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]: | |
11fdf7f2 | 177 | |
a4b75251 TL |
178 | if hasattr(fsal, 'user_id'): |
179 | fsal.pop('user_id') # mgr/nfs does not let you customize user_id | |
180 | raw_ex = { | |
11fdf7f2 | 181 | 'path': path, |
11fdf7f2 | 182 | 'pseudo': pseudo, |
a4b75251 TL |
183 | 'cluster_id': cluster_id, |
184 | 'export_id': export_id, | |
11fdf7f2 TL |
185 | 'access_type': access_type, |
186 | 'squash': squash, | |
187 | 'security_label': security_label, | |
188 | 'protocols': protocols, | |
189 | 'transports': transports, | |
190 | 'fsal': fsal, | |
191 | 'clients': clients | |
a4b75251 TL |
192 | } |
193 | ||
194 | export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj') | |
195 | ret, _, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex)) | |
196 | if ret == 0: | |
197 | return self._get_schema_export( | |
198 | export_mgr.get_export_by_pseudo(cluster_id, pseudo)) | |
199 | raise NFSException(f"Failed to update export: {err}") | |
11fdf7f2 TL |
200 | |
201 | @NfsTask('delete', {'cluster_id': '{cluster_id}', | |
202 | 'export_id': '{export_id}'}, 2.0) | |
203 | @EndpointDoc("Deletes an NFS-Ganesha export", | |
204 | parameters={ | |
205 | 'cluster_id': (str, 'Cluster identifier'), | |
a4b75251 | 206 | 'export_id': (int, "Export ID") |
11fdf7f2 | 207 | }) |
a4b75251 TL |
208 | @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore |
209 | def delete(self, cluster_id, export_id): | |
11fdf7f2 | 210 | export_id = int(export_id) |
11fdf7f2 | 211 | |
a4b75251 TL |
212 | export = mgr.remote('nfs', 'export_get', cluster_id, export_id) |
213 | if not export: | |
214 | raise DashboardException( | |
215 | http_status_code=404, | |
216 | msg=f'Export with id {export_id} not found.', | |
217 | component='nfs') | |
218 | mgr.remote('nfs', 'export_rm', cluster_id, export['pseudo']) | |
11fdf7f2 TL |
219 | |
220 | ||
a4b75251 | 221 | @UIRouter('/nfs-ganesha', Scope.NFS_GANESHA) |
11fdf7f2 | 222 | class NFSGaneshaUi(BaseController): |
11fdf7f2 | 223 | @Endpoint('GET', '/fsals') |
f91f0fd5 | 224 | @ReadPermission |
11fdf7f2 | 225 | def fsals(self): |
a4b75251 | 226 | return NFS_GANESHA_SUPPORTED_FSALS |
11fdf7f2 TL |
227 | |
228 | @Endpoint('GET', '/lsdir') | |
f91f0fd5 TL |
229 | @ReadPermission |
230 | def lsdir(self, fs_name, root_dir=None, depth=1): # pragma: no cover | |
11fdf7f2 TL |
231 | if root_dir is None: |
232 | root_dir = "/" | |
f91f0fd5 TL |
233 | if not root_dir.startswith('/'): |
234 | root_dir = '/{}'.format(root_dir) | |
235 | root_dir = os.path.normpath(root_dir) | |
236 | ||
237 | try: | |
238 | depth = int(depth) | |
239 | error_msg = '' | |
240 | if depth < 0: | |
241 | error_msg = '`depth` must be greater or equal to 0.' | |
242 | if depth > 5: | |
243 | logger.warning("Limiting depth to maximum value of 5: " | |
244 | "input depth=%s", depth) | |
245 | depth = 5 | |
246 | except ValueError: | |
247 | error_msg = '`depth` must be an integer.' | |
248 | finally: | |
249 | if error_msg: | |
250 | raise DashboardException(code=400, | |
a4b75251 | 251 | component='nfs', |
f91f0fd5 TL |
252 | msg=error_msg) |
253 | ||
11fdf7f2 | 254 | try: |
f91f0fd5 TL |
255 | cfs = CephFS(fs_name) |
256 | paths = [root_dir] | |
257 | paths.extend([p['path'].rstrip('/') | |
258 | for p in cfs.ls_dir(root_dir, depth)]) | |
11fdf7f2 | 259 | except (cephfs.ObjectNotFound, cephfs.PermissionError): |
9f95a23c TL |
260 | paths = [] |
261 | return {'paths': paths} | |
11fdf7f2 TL |
262 | |
263 | @Endpoint('GET', '/cephfs/filesystems') | |
f91f0fd5 | 264 | @ReadPermission |
11fdf7f2 TL |
265 | def filesystems(self): |
266 | return CephFS.list_filesystems() | |
2a845540 TL |
267 | |
268 | @Endpoint() | |
269 | @ReadPermission | |
270 | def status(self): | |
271 | status = {'available': True, 'message': None} | |
272 | try: | |
273 | mgr.remote('nfs', 'cluster_ls') | |
274 | except (ImportError, RuntimeError) as error: | |
275 | logger.exception(error) | |
276 | status['available'] = False | |
277 | status['message'] = str(error) # type: ignore | |
278 | ||
279 | return status |