]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | # -*- coding: utf-8 -*- |
2 | from __future__ import absolute_import | |
3 | ||
9f95a23c | 4 | import logging |
f91f0fd5 TL |
5 | import os |
6 | from functools import partial | |
11fdf7f2 TL |
7 | |
8 | import cherrypy | |
9 | import cephfs | |
10 | ||
11 | from . import ApiController, RESTController, UiApiController, BaseController, \ | |
12 | Endpoint, Task, ReadPermission, ControllerDoc, EndpointDoc | |
11fdf7f2 TL |
13 | from ..security import Scope |
14 | from ..services.cephfs import CephFS | |
15 | from ..services.cephx import CephX | |
f91f0fd5 | 16 | from ..services.exception import DashboardException, serialize_dashboard_exception |
11fdf7f2 TL |
17 | from ..services.ganesha import Ganesha, GaneshaConf, NFSException |
18 | from ..services.rgw_client import RgwClient | |
19 | ||
20 | ||
9f95a23c TL |
21 | logger = logging.getLogger('controllers.ganesha') |
22 | ||
23 | ||
11fdf7f2 TL |
24 | # documentation helpers |
25 | EXPORT_SCHEMA = { | |
26 | 'export_id': (int, 'Export ID'), | |
27 | 'path': (str, 'Export path'), | |
28 | 'cluster_id': (str, 'Cluster identifier'), | |
29 | 'daemons': ([str], 'List of NFS Ganesha daemons identifiers'), | |
30 | 'pseudo': (str, 'Pseudo FS path'), | |
31 | 'tag': (str, 'NFSv3 export tag'), | |
32 | 'access_type': (str, 'Export access type'), | |
33 | 'squash': (str, 'Export squash policy'), | |
34 | 'security_label': (str, 'Security label'), | |
35 | 'protocols': ([int], 'List of protocol types'), | |
36 | 'transports': ([str], 'List of transport types'), | |
37 | 'fsal': ({ | |
38 | 'name': (str, 'name of FSAL'), | |
39 | 'user_id': (str, 'CephX user id', True), | |
40 | 'filesystem': (str, 'CephFS filesystem ID', True), | |
41 | 'sec_label_xattr': (str, 'Name of xattr for security label', True), | |
42 | 'rgw_user_id': (str, 'RGW user id', True) | |
43 | }, 'FSAL configuration'), | |
44 | 'clients': ([{ | |
45 | 'addresses': ([str], 'list of IP addresses'), | |
46 | 'access_type': (str, 'Client access type'), | |
47 | 'squash': (str, 'Client squash policy') | |
48 | }], 'List of client configurations'), | |
49 | } | |
50 | ||
51 | ||
52 | CREATE_EXPORT_SCHEMA = { | |
53 | 'path': (str, 'Export path'), | |
54 | 'cluster_id': (str, 'Cluster identifier'), | |
55 | 'daemons': ([str], 'List of NFS Ganesha daemons identifiers'), | |
56 | 'pseudo': (str, 'Pseudo FS path'), | |
57 | 'tag': (str, 'NFSv3 export tag'), | |
58 | 'access_type': (str, 'Export access type'), | |
59 | 'squash': (str, 'Export squash policy'), | |
60 | 'security_label': (str, 'Security label'), | |
61 | 'protocols': ([int], 'List of protocol types'), | |
62 | 'transports': ([str], 'List of transport types'), | |
63 | 'fsal': ({ | |
64 | 'name': (str, 'name of FSAL'), | |
65 | 'user_id': (str, 'CephX user id', True), | |
66 | 'filesystem': (str, 'CephFS filesystem ID', True), | |
67 | 'sec_label_xattr': (str, 'Name of xattr for security label', True), | |
68 | 'rgw_user_id': (str, 'RGW user id', True) | |
69 | }, 'FSAL configuration'), | |
70 | 'clients': ([{ | |
71 | 'addresses': ([str], 'list of IP addresses'), | |
72 | 'access_type': (str, 'Client access type'), | |
73 | 'squash': (str, 'Client squash policy') | |
74 | }], 'List of client configurations'), | |
75 | 'reload_daemons': (bool, | |
76 | 'Trigger reload of NFS-Ganesha daemons configuration', | |
77 | True) | |
78 | } | |
79 | ||
80 | ||
81 | # pylint: disable=not-callable | |
9f95a23c | 82 | def NfsTask(name, metadata, wait_for): # noqa: N802 |
11fdf7f2 TL |
83 | def composed_decorator(func): |
84 | return Task("nfs/{}".format(name), metadata, wait_for, | |
85 | partial(serialize_dashboard_exception, | |
86 | include_http_status=True))(func) | |
87 | return composed_decorator | |
88 | ||
89 | ||
90 | @ApiController('/nfs-ganesha', Scope.NFS_GANESHA) | |
91 | @ControllerDoc("NFS-Ganesha Management API", "NFS-Ganesha") | |
92 | class NFSGanesha(RESTController): | |
93 | ||
94 | @EndpointDoc("Status of NFS-Ganesha management feature", | |
95 | responses={200: { | |
96 | 'available': (bool, "Is API available?"), | |
97 | 'message': (str, "Error message") | |
98 | }}) | |
99 | @Endpoint() | |
100 | @ReadPermission | |
101 | def status(self): | |
102 | status = {'available': True, 'message': None} | |
103 | try: | |
104 | Ganesha.get_ganesha_clusters() | |
105 | except NFSException as e: | |
9f95a23c | 106 | status['message'] = str(e) # type: ignore |
11fdf7f2 TL |
107 | status['available'] = False |
108 | ||
109 | return status | |
110 | ||
111 | ||
112 | @ApiController('/nfs-ganesha/export', Scope.NFS_GANESHA) | |
113 | @ControllerDoc(group="NFS-Ganesha") | |
114 | class NFSGaneshaExports(RESTController): | |
115 | RESOURCE_ID = "cluster_id/export_id" | |
116 | ||
117 | @EndpointDoc("List all NFS-Ganesha exports", | |
118 | responses={200: [EXPORT_SCHEMA]}) | |
119 | def list(self): | |
120 | result = [] | |
121 | for cluster_id in Ganesha.get_ganesha_clusters(): | |
122 | result.extend( | |
123 | [export.to_dict() | |
124 | for export in GaneshaConf.instance(cluster_id).list_exports()]) | |
125 | return result | |
126 | ||
127 | @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}', | |
128 | 'cluster_id': '{cluster_id}'}, 2.0) | |
129 | @EndpointDoc("Creates a new NFS-Ganesha export", | |
130 | parameters=CREATE_EXPORT_SCHEMA, | |
131 | responses={201: EXPORT_SCHEMA}) | |
132 | def create(self, path, cluster_id, daemons, pseudo, tag, access_type, | |
133 | squash, security_label, protocols, transports, fsal, clients, | |
134 | reload_daemons=True): | |
135 | if fsal['name'] not in Ganesha.fsals_available(): | |
136 | raise NFSException("Cannot create this export. " | |
137 | "FSAL '{}' cannot be managed by the dashboard." | |
138 | .format(fsal['name'])) | |
139 | ||
140 | ganesha_conf = GaneshaConf.instance(cluster_id) | |
141 | ex_id = ganesha_conf.create_export({ | |
142 | 'path': path, | |
143 | 'pseudo': pseudo, | |
144 | 'cluster_id': cluster_id, | |
145 | 'daemons': daemons, | |
146 | 'tag': tag, | |
147 | 'access_type': access_type, | |
148 | 'squash': squash, | |
149 | 'security_label': security_label, | |
150 | 'protocols': protocols, | |
151 | 'transports': transports, | |
152 | 'fsal': fsal, | |
153 | 'clients': clients | |
154 | }) | |
155 | if reload_daemons: | |
156 | ganesha_conf.reload_daemons(daemons) | |
157 | return ganesha_conf.get_export(ex_id).to_dict() | |
158 | ||
159 | @EndpointDoc("Get an NFS-Ganesha export", | |
160 | parameters={ | |
161 | 'cluster_id': (str, 'Cluster identifier'), | |
162 | 'export_id': (int, "Export ID") | |
163 | }, | |
164 | responses={200: EXPORT_SCHEMA}) | |
165 | def get(self, cluster_id, export_id): | |
166 | export_id = int(export_id) | |
167 | ganesha_conf = GaneshaConf.instance(cluster_id) | |
168 | if not ganesha_conf.has_export(export_id): | |
169 | raise cherrypy.HTTPError(404) | |
170 | return ganesha_conf.get_export(export_id).to_dict() | |
171 | ||
172 | @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'}, | |
173 | 2.0) | |
174 | @EndpointDoc("Updates an NFS-Ganesha export", | |
175 | parameters=dict(export_id=(int, "Export ID"), | |
176 | **CREATE_EXPORT_SCHEMA), | |
177 | responses={200: EXPORT_SCHEMA}) | |
178 | def set(self, cluster_id, export_id, path, daemons, pseudo, tag, access_type, | |
179 | squash, security_label, protocols, transports, fsal, clients, | |
180 | reload_daemons=True): | |
181 | export_id = int(export_id) | |
182 | ganesha_conf = GaneshaConf.instance(cluster_id) | |
183 | ||
184 | if not ganesha_conf.has_export(export_id): | |
f6b5b4d7 | 185 | raise cherrypy.HTTPError(404) # pragma: no cover - the handling is too obvious |
11fdf7f2 TL |
186 | |
187 | if fsal['name'] not in Ganesha.fsals_available(): | |
188 | raise NFSException("Cannot make modifications to this export. " | |
189 | "FSAL '{}' cannot be managed by the dashboard." | |
190 | .format(fsal['name'])) | |
191 | ||
192 | old_export = ganesha_conf.update_export({ | |
193 | 'export_id': export_id, | |
194 | 'path': path, | |
195 | 'cluster_id': cluster_id, | |
196 | 'daemons': daemons, | |
197 | 'pseudo': pseudo, | |
198 | 'tag': tag, | |
199 | 'access_type': access_type, | |
200 | 'squash': squash, | |
201 | 'security_label': security_label, | |
202 | 'protocols': protocols, | |
203 | 'transports': transports, | |
204 | 'fsal': fsal, | |
205 | 'clients': clients | |
206 | }) | |
207 | daemons = list(daemons) | |
208 | for d_id in old_export.daemons: | |
209 | if d_id not in daemons: | |
210 | daemons.append(d_id) | |
211 | if reload_daemons: | |
212 | ganesha_conf.reload_daemons(daemons) | |
213 | return ganesha_conf.get_export(export_id).to_dict() | |
214 | ||
215 | @NfsTask('delete', {'cluster_id': '{cluster_id}', | |
216 | 'export_id': '{export_id}'}, 2.0) | |
217 | @EndpointDoc("Deletes an NFS-Ganesha export", | |
218 | parameters={ | |
219 | 'cluster_id': (str, 'Cluster identifier'), | |
220 | 'export_id': (int, "Export ID"), | |
221 | 'reload_daemons': (bool, | |
222 | 'Trigger reload of NFS-Ganesha daemons' | |
223 | ' configuration', | |
224 | True) | |
225 | }) | |
226 | def delete(self, cluster_id, export_id, reload_daemons=True): | |
227 | export_id = int(export_id) | |
228 | ganesha_conf = GaneshaConf.instance(cluster_id) | |
229 | ||
230 | if not ganesha_conf.has_export(export_id): | |
f6b5b4d7 | 231 | raise cherrypy.HTTPError(404) # pragma: no cover - the handling is too obvious |
11fdf7f2 TL |
232 | export = ganesha_conf.remove_export(export_id) |
233 | if reload_daemons: | |
234 | ganesha_conf.reload_daemons(export.daemons) | |
235 | ||
236 | ||
f91f0fd5 | 237 | @ApiController('/nfs-ganesha/daemon', Scope.NFS_GANESHA) |
11fdf7f2 TL |
238 | @ControllerDoc(group="NFS-Ganesha") |
239 | class NFSGaneshaService(RESTController): | |
240 | ||
241 | @EndpointDoc("List NFS-Ganesha daemons information", | |
242 | responses={200: [{ | |
243 | 'daemon_id': (str, 'Daemon identifier'), | |
244 | 'cluster_id': (str, 'Cluster identifier'), | |
f91f0fd5 TL |
245 | 'cluster_type': (str, 'Cluster type'), |
246 | 'status': (int, 'Status of daemon', True), | |
247 | 'desc': (str, 'Status description', True) | |
11fdf7f2 TL |
248 | }]}) |
249 | def list(self): | |
11fdf7f2 TL |
250 | result = [] |
251 | for cluster_id in Ganesha.get_ganesha_clusters(): | |
f91f0fd5 | 252 | result.extend(GaneshaConf.instance(cluster_id).list_daemons()) |
11fdf7f2 TL |
253 | return result |
254 | ||
255 | ||
f91f0fd5 | 256 | @UiApiController('/nfs-ganesha', Scope.NFS_GANESHA) |
11fdf7f2 TL |
257 | class NFSGaneshaUi(BaseController): |
258 | @Endpoint('GET', '/cephx/clients') | |
f91f0fd5 | 259 | @ReadPermission |
11fdf7f2 TL |
260 | def cephx_clients(self): |
261 | return [client for client in CephX.list_clients()] | |
262 | ||
263 | @Endpoint('GET', '/fsals') | |
f91f0fd5 | 264 | @ReadPermission |
11fdf7f2 TL |
265 | def fsals(self): |
266 | return Ganesha.fsals_available() | |
267 | ||
268 | @Endpoint('GET', '/lsdir') | |
f91f0fd5 TL |
269 | @ReadPermission |
270 | def lsdir(self, fs_name, root_dir=None, depth=1): # pragma: no cover | |
11fdf7f2 TL |
271 | if root_dir is None: |
272 | root_dir = "/" | |
f91f0fd5 TL |
273 | if not root_dir.startswith('/'): |
274 | root_dir = '/{}'.format(root_dir) | |
275 | root_dir = os.path.normpath(root_dir) | |
276 | ||
277 | try: | |
278 | depth = int(depth) | |
279 | error_msg = '' | |
280 | if depth < 0: | |
281 | error_msg = '`depth` must be greater or equal to 0.' | |
282 | if depth > 5: | |
283 | logger.warning("Limiting depth to maximum value of 5: " | |
284 | "input depth=%s", depth) | |
285 | depth = 5 | |
286 | except ValueError: | |
287 | error_msg = '`depth` must be an integer.' | |
288 | finally: | |
289 | if error_msg: | |
290 | raise DashboardException(code=400, | |
291 | component='nfsganesha', | |
292 | msg=error_msg) | |
293 | ||
11fdf7f2 | 294 | try: |
f91f0fd5 TL |
295 | cfs = CephFS(fs_name) |
296 | paths = [root_dir] | |
297 | paths.extend([p['path'].rstrip('/') | |
298 | for p in cfs.ls_dir(root_dir, depth)]) | |
11fdf7f2 | 299 | except (cephfs.ObjectNotFound, cephfs.PermissionError): |
9f95a23c TL |
300 | paths = [] |
301 | return {'paths': paths} | |
11fdf7f2 TL |
302 | |
303 | @Endpoint('GET', '/cephfs/filesystems') | |
f91f0fd5 | 304 | @ReadPermission |
11fdf7f2 TL |
305 | def filesystems(self): |
306 | return CephFS.list_filesystems() | |
307 | ||
308 | @Endpoint('GET', '/rgw/buckets') | |
f91f0fd5 | 309 | @ReadPermission |
11fdf7f2 TL |
310 | def buckets(self, user_id=None): |
311 | return RgwClient.instance(user_id).get_buckets() | |
312 | ||
313 | @Endpoint('GET', '/clusters') | |
f91f0fd5 | 314 | @ReadPermission |
11fdf7f2 TL |
315 | def clusters(self): |
316 | return Ganesha.get_ganesha_clusters() |