]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/controllers/nfs.py
bump version to 19.2.0-pve1
[ceph.git] / ceph / src / pybind / mgr / dashboard / controllers / nfs.py
1 # -*- coding: utf-8 -*-
2
3 import json
4 import logging
5 import os
6 from functools import partial
7 from typing import Any, Dict, List, Optional
8
9 import cephfs
10 from mgr_module import NFS_GANESHA_SUPPORTED_FSALS
11
12 from .. import mgr
13 from ..security import Scope
14 from ..services.cephfs import CephFS
15 from ..services.exception import DashboardException, handle_cephfs_error, \
16 serialize_dashboard_exception
17 from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
18 ReadPermission, RESTController, Task, UIRouter
19 from ._version import APIVersion
20
21 logger = logging.getLogger('controllers.nfs')
22
23
24 class NFSException(DashboardException):
25 def __init__(self, msg):
26 super(NFSException, self).__init__(component="nfs", msg=msg)
27
28
29 # documentation helpers
30 EXPORT_SCHEMA = {
31 'export_id': (int, 'Export ID'),
32 'path': (str, 'Export path'),
33 'cluster_id': (str, 'Cluster identifier'),
34 'pseudo': (str, 'Pseudo FS path'),
35 'access_type': (str, 'Export access type'),
36 'squash': (str, 'Export squash policy'),
37 'security_label': (str, 'Security label'),
38 'protocols': ([int], 'List of protocol types'),
39 'transports': ([str], 'List of transport types'),
40 'fsal': ({
41 'name': (str, 'name of FSAL'),
42 'fs_name': (str, 'CephFS filesystem name', True),
43 'sec_label_xattr': (str, 'Name of xattr for security label', True),
44 'user_id': (str, 'User id', True)
45 }, 'FSAL configuration'),
46 'clients': ([{
47 'addresses': ([str], 'list of IP addresses'),
48 'access_type': (str, 'Client access type'),
49 'squash': (str, 'Client squash policy')
50 }], 'List of client configurations'),
51 }
52
53
54 CREATE_EXPORT_SCHEMA = {
55 'path': (str, 'Export path'),
56 'cluster_id': (str, 'Cluster identifier'),
57 'pseudo': (str, 'Pseudo FS path'),
58 'access_type': (str, 'Export access type'),
59 'squash': (str, 'Export squash policy'),
60 'security_label': (str, 'Security label'),
61 'protocols': ([int], 'List of protocol types'),
62 'transports': ([str], 'List of transport types'),
63 'fsal': ({
64 'name': (str, 'name of FSAL'),
65 'fs_name': (str, 'CephFS filesystem name', True),
66 'sec_label_xattr': (str, 'Name of xattr for security label', True)
67 }, 'FSAL configuration'),
68 'clients': ([{
69 'addresses': ([str], 'list of IP addresses'),
70 'access_type': (str, 'Client access type'),
71 'squash': (str, 'Client squash policy')
72 }], 'List of client configurations')
73 }
74
75
76 # pylint: disable=not-callable
77 def NfsTask(name, metadata, wait_for): # noqa: N802
78 def composed_decorator(func):
79 return Task("nfs/{}".format(name), metadata, wait_for,
80 partial(serialize_dashboard_exception,
81 include_http_status=True))(func)
82 return composed_decorator
83
84
85 @APIRouter('/nfs-ganesha/cluster', Scope.NFS_GANESHA)
86 @APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha")
87 class NFSGaneshaCluster(RESTController):
88 @ReadPermission
89 @RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
90 def list(self):
91 return mgr.remote('nfs', 'cluster_ls')
92
93
94 @APIRouter('/nfs-ganesha/export', Scope.NFS_GANESHA)
95 @APIDoc(group="NFS-Ganesha")
96 class NFSGaneshaExports(RESTController):
97 RESOURCE_ID = "cluster_id/export_id"
98
99 @staticmethod
100 def _get_schema_export(export: Dict[str, Any]) -> Dict[str, Any]:
101 """
102 Method that avoids returning export info not exposed in the export schema
103 e.g., rgw user access/secret keys.
104 """
105 schema_fsal_info = {}
106 for key in export['fsal'].keys():
107 if key in EXPORT_SCHEMA['fsal'][0].keys(): # type: ignore
108 schema_fsal_info[key] = export['fsal'][key]
109 export['fsal'] = schema_fsal_info
110 return export
111
112 @EndpointDoc("List all NFS-Ganesha exports",
113 responses={200: [EXPORT_SCHEMA]})
114 def list(self) -> List[Dict[str, Any]]:
115 exports = []
116 for export in mgr.remote('nfs', 'export_ls'):
117 exports.append(self._get_schema_export(export))
118
119 return exports
120
121 @handle_cephfs_error()
122 @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
123 'cluster_id': '{cluster_id}'}, 2.0)
124 @EndpointDoc("Creates a new NFS-Ganesha export",
125 parameters=CREATE_EXPORT_SCHEMA,
126 responses={201: EXPORT_SCHEMA})
127 @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
128 def create(self, path, cluster_id, pseudo, access_type,
129 squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
130 export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
131 if export_mgr.get_export_by_pseudo(cluster_id, pseudo):
132 raise DashboardException(msg=f'Pseudo {pseudo} is already in use.',
133 component='nfs')
134 if hasattr(fsal, 'user_id'):
135 fsal.pop('user_id') # mgr/nfs does not let you customize user_id
136 raw_ex = {
137 'path': path,
138 'pseudo': pseudo,
139 'cluster_id': cluster_id,
140 'access_type': access_type,
141 'squash': squash,
142 'security_label': security_label,
143 'protocols': protocols,
144 'transports': transports,
145 'fsal': fsal,
146 'clients': clients
147 }
148 applied_exports = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
149 if not applied_exports.has_error:
150 return self._get_schema_export(
151 export_mgr.get_export_by_pseudo(cluster_id, pseudo))
152 raise NFSException(f"Export creation failed {applied_exports.changes[0].msg}")
153
154 @EndpointDoc("Get an NFS-Ganesha export",
155 parameters={
156 'cluster_id': (str, 'Cluster identifier'),
157 'export_id': (str, "Export ID")
158 },
159 responses={200: EXPORT_SCHEMA})
160 def get(self, cluster_id, export_id) -> Optional[Dict[str, Any]]:
161 export_id = int(export_id)
162 export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
163 if export:
164 export = self._get_schema_export(export)
165
166 return export
167
168 @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
169 2.0)
170 @EndpointDoc("Updates an NFS-Ganesha export",
171 parameters=dict(export_id=(int, "Export ID"),
172 **CREATE_EXPORT_SCHEMA),
173 responses={200: EXPORT_SCHEMA})
174 @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
175 def set(self, cluster_id, export_id, path, pseudo, access_type,
176 squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
177
178 if hasattr(fsal, 'user_id'):
179 fsal.pop('user_id') # mgr/nfs does not let you customize user_id
180 raw_ex = {
181 'path': path,
182 'pseudo': pseudo,
183 'cluster_id': cluster_id,
184 'export_id': export_id,
185 'access_type': access_type,
186 'squash': squash,
187 'security_label': security_label,
188 'protocols': protocols,
189 'transports': transports,
190 'fsal': fsal,
191 'clients': clients
192 }
193
194 export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
195 applied_exports = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
196 if not applied_exports.has_error:
197 return self._get_schema_export(
198 export_mgr.get_export_by_pseudo(cluster_id, pseudo))
199 raise NFSException(f"Export creation failed {applied_exports.changes[0].msg}")
200
201 @NfsTask('delete', {'cluster_id': '{cluster_id}',
202 'export_id': '{export_id}'}, 2.0)
203 @EndpointDoc("Deletes an NFS-Ganesha export",
204 parameters={
205 'cluster_id': (str, 'Cluster identifier'),
206 'export_id': (int, "Export ID")
207 })
208 @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
209 def delete(self, cluster_id, export_id):
210 export_id = int(export_id)
211
212 export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
213 if not export:
214 raise DashboardException(
215 http_status_code=404,
216 msg=f'Export with id {export_id} not found.',
217 component='nfs')
218 mgr.remote('nfs', 'export_rm', cluster_id, export['pseudo'])
219
220
221 @UIRouter('/nfs-ganesha', Scope.NFS_GANESHA)
222 class NFSGaneshaUi(BaseController):
223 @Endpoint('GET', '/fsals')
224 @ReadPermission
225 def fsals(self):
226 return NFS_GANESHA_SUPPORTED_FSALS
227
228 @Endpoint('GET', '/lsdir')
229 @ReadPermission
230 def lsdir(self, fs_name, root_dir=None, depth=1): # pragma: no cover
231 if root_dir is None:
232 root_dir = "/"
233 if not root_dir.startswith('/'):
234 root_dir = '/{}'.format(root_dir)
235 root_dir = os.path.normpath(root_dir)
236
237 try:
238 depth = int(depth)
239 error_msg = ''
240 if depth < 0:
241 error_msg = '`depth` must be greater or equal to 0.'
242 if depth > 5:
243 logger.warning("Limiting depth to maximum value of 5: "
244 "input depth=%s", depth)
245 depth = 5
246 except ValueError:
247 error_msg = '`depth` must be an integer.'
248 finally:
249 if error_msg:
250 raise DashboardException(code=400,
251 component='nfs',
252 msg=error_msg)
253
254 try:
255 cfs = CephFS(fs_name)
256 paths = [root_dir]
257 paths.extend([p['path'].rstrip('/')
258 for p in cfs.ls_dir(root_dir, depth)])
259 except (cephfs.ObjectNotFound, cephfs.PermissionError):
260 paths = []
261 return {'paths': paths}
262
263 @Endpoint('GET', '/cephfs/filesystems')
264 @ReadPermission
265 def filesystems(self):
266 return CephFS.list_filesystems()
267
268 @Endpoint()
269 @ReadPermission
270 def status(self):
271 status = {'available': True, 'message': None}
272 try:
273 mgr.remote('nfs', 'cluster_ls')
274 except (ImportError, RuntimeError) as error:
275 logger.exception(error)
276 status['available'] = False
277 status['message'] = str(error) # type: ignore
278
279 return status