]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/controllers/nfs.py
3c177511c1da9f9769c05d4147ef2f0f98eedb5f
[ceph.git] / ceph / src / pybind / mgr / dashboard / controllers / nfs.py
1 # -*- coding: utf-8 -*-
2
3 import json
4 import logging
5 import os
6 from functools import partial
7 from typing import Any, Dict, List, Optional
8
9 import cephfs
10 from mgr_module import NFS_GANESHA_SUPPORTED_FSALS
11
12 from .. import mgr
13 from ..security import Scope
14 from ..services.cephfs import CephFS
15 from ..services.exception import DashboardException, serialize_dashboard_exception
16 from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
17 ReadPermission, RESTController, Task, UIRouter
18 from ._version import APIVersion
19
20 logger = logging.getLogger('controllers.nfs')
21
22
23 class NFSException(DashboardException):
24 def __init__(self, msg):
25 super(NFSException, self).__init__(component="nfs", msg=msg)
26
27
28 # documentation helpers
29 EXPORT_SCHEMA = {
30 'export_id': (int, 'Export ID'),
31 'path': (str, 'Export path'),
32 'cluster_id': (str, 'Cluster identifier'),
33 'pseudo': (str, 'Pseudo FS path'),
34 'access_type': (str, 'Export access type'),
35 'squash': (str, 'Export squash policy'),
36 'security_label': (str, 'Security label'),
37 'protocols': ([int], 'List of protocol types'),
38 'transports': ([str], 'List of transport types'),
39 'fsal': ({
40 'name': (str, 'name of FSAL'),
41 'fs_name': (str, 'CephFS filesystem name', True),
42 'sec_label_xattr': (str, 'Name of xattr for security label', True),
43 'user_id': (str, 'User id', True)
44 }, 'FSAL configuration'),
45 'clients': ([{
46 'addresses': ([str], 'list of IP addresses'),
47 'access_type': (str, 'Client access type'),
48 'squash': (str, 'Client squash policy')
49 }], 'List of client configurations'),
50 }
51
52
53 CREATE_EXPORT_SCHEMA = {
54 'path': (str, 'Export path'),
55 'cluster_id': (str, 'Cluster identifier'),
56 'pseudo': (str, 'Pseudo FS path'),
57 'access_type': (str, 'Export access type'),
58 'squash': (str, 'Export squash policy'),
59 'security_label': (str, 'Security label'),
60 'protocols': ([int], 'List of protocol types'),
61 'transports': ([str], 'List of transport types'),
62 'fsal': ({
63 'name': (str, 'name of FSAL'),
64 'fs_name': (str, 'CephFS filesystem name', True),
65 'sec_label_xattr': (str, 'Name of xattr for security label', True)
66 }, 'FSAL configuration'),
67 'clients': ([{
68 'addresses': ([str], 'list of IP addresses'),
69 'access_type': (str, 'Client access type'),
70 'squash': (str, 'Client squash policy')
71 }], 'List of client configurations')
72 }
73
74
75 # pylint: disable=not-callable
76 def NfsTask(name, metadata, wait_for): # noqa: N802
77 def composed_decorator(func):
78 return Task("nfs/{}".format(name), metadata, wait_for,
79 partial(serialize_dashboard_exception,
80 include_http_status=True))(func)
81 return composed_decorator
82
83
84 @APIRouter('/nfs-ganesha/cluster', Scope.NFS_GANESHA)
85 @APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha")
86 class NFSGaneshaCluster(RESTController):
87 @ReadPermission
88 @RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
89 def list(self):
90 return mgr.remote('nfs', 'cluster_ls')
91
92
93 @APIRouter('/nfs-ganesha/export', Scope.NFS_GANESHA)
94 @APIDoc(group="NFS-Ganesha")
95 class NFSGaneshaExports(RESTController):
96 RESOURCE_ID = "cluster_id/export_id"
97
98 @staticmethod
99 def _get_schema_export(export: Dict[str, Any]) -> Dict[str, Any]:
100 """
101 Method that avoids returning export info not exposed in the export schema
102 e.g., rgw user access/secret keys.
103 """
104 schema_fsal_info = {}
105 for key in export['fsal'].keys():
106 if key in EXPORT_SCHEMA['fsal'][0].keys(): # type: ignore
107 schema_fsal_info[key] = export['fsal'][key]
108 export['fsal'] = schema_fsal_info
109 return export
110
111 @EndpointDoc("List all NFS-Ganesha exports",
112 responses={200: [EXPORT_SCHEMA]})
113 def list(self) -> List[Dict[str, Any]]:
114 exports = []
115 for export in mgr.remote('nfs', 'export_ls'):
116 exports.append(self._get_schema_export(export))
117
118 return exports
119
120 @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
121 'cluster_id': '{cluster_id}'}, 2.0)
122 @EndpointDoc("Creates a new NFS-Ganesha export",
123 parameters=CREATE_EXPORT_SCHEMA,
124 responses={201: EXPORT_SCHEMA})
125 @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
126 def create(self, path, cluster_id, pseudo, access_type,
127 squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
128 export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
129 if export_mgr.get_export_by_pseudo(cluster_id, pseudo):
130 raise DashboardException(msg=f'Pseudo {pseudo} is already in use.',
131 component='nfs')
132 if hasattr(fsal, 'user_id'):
133 fsal.pop('user_id') # mgr/nfs does not let you customize user_id
134 raw_ex = {
135 'path': path,
136 'pseudo': pseudo,
137 'cluster_id': cluster_id,
138 'access_type': access_type,
139 'squash': squash,
140 'security_label': security_label,
141 'protocols': protocols,
142 'transports': transports,
143 'fsal': fsal,
144 'clients': clients
145 }
146 ret, _, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
147 if ret == 0:
148 return self._get_schema_export(
149 export_mgr.get_export_by_pseudo(cluster_id, pseudo))
150 raise NFSException(f"Export creation failed {err}")
151
152 @EndpointDoc("Get an NFS-Ganesha export",
153 parameters={
154 'cluster_id': (str, 'Cluster identifier'),
155 'export_id': (str, "Export ID")
156 },
157 responses={200: EXPORT_SCHEMA})
158 def get(self, cluster_id, export_id) -> Optional[Dict[str, Any]]:
159 export_id = int(export_id)
160 export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
161 if export:
162 export = self._get_schema_export(export)
163
164 return export
165
166 @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
167 2.0)
168 @EndpointDoc("Updates an NFS-Ganesha export",
169 parameters=dict(export_id=(int, "Export ID"),
170 **CREATE_EXPORT_SCHEMA),
171 responses={200: EXPORT_SCHEMA})
172 @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
173 def set(self, cluster_id, export_id, path, pseudo, access_type,
174 squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
175
176 if hasattr(fsal, 'user_id'):
177 fsal.pop('user_id') # mgr/nfs does not let you customize user_id
178 raw_ex = {
179 'path': path,
180 'pseudo': pseudo,
181 'cluster_id': cluster_id,
182 'export_id': export_id,
183 'access_type': access_type,
184 'squash': squash,
185 'security_label': security_label,
186 'protocols': protocols,
187 'transports': transports,
188 'fsal': fsal,
189 'clients': clients
190 }
191
192 export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
193 ret, _, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
194 if ret == 0:
195 return self._get_schema_export(
196 export_mgr.get_export_by_pseudo(cluster_id, pseudo))
197 raise NFSException(f"Failed to update export: {err}")
198
199 @NfsTask('delete', {'cluster_id': '{cluster_id}',
200 'export_id': '{export_id}'}, 2.0)
201 @EndpointDoc("Deletes an NFS-Ganesha export",
202 parameters={
203 'cluster_id': (str, 'Cluster identifier'),
204 'export_id': (int, "Export ID")
205 })
206 @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
207 def delete(self, cluster_id, export_id):
208 export_id = int(export_id)
209
210 export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
211 if not export:
212 raise DashboardException(
213 http_status_code=404,
214 msg=f'Export with id {export_id} not found.',
215 component='nfs')
216 mgr.remote('nfs', 'export_rm', cluster_id, export['pseudo'])
217
218
219 @UIRouter('/nfs-ganesha', Scope.NFS_GANESHA)
220 class NFSGaneshaUi(BaseController):
221 @Endpoint('GET', '/fsals')
222 @ReadPermission
223 def fsals(self):
224 return NFS_GANESHA_SUPPORTED_FSALS
225
226 @Endpoint('GET', '/lsdir')
227 @ReadPermission
228 def lsdir(self, fs_name, root_dir=None, depth=1): # pragma: no cover
229 if root_dir is None:
230 root_dir = "/"
231 if not root_dir.startswith('/'):
232 root_dir = '/{}'.format(root_dir)
233 root_dir = os.path.normpath(root_dir)
234
235 try:
236 depth = int(depth)
237 error_msg = ''
238 if depth < 0:
239 error_msg = '`depth` must be greater or equal to 0.'
240 if depth > 5:
241 logger.warning("Limiting depth to maximum value of 5: "
242 "input depth=%s", depth)
243 depth = 5
244 except ValueError:
245 error_msg = '`depth` must be an integer.'
246 finally:
247 if error_msg:
248 raise DashboardException(code=400,
249 component='nfs',
250 msg=error_msg)
251
252 try:
253 cfs = CephFS(fs_name)
254 paths = [root_dir]
255 paths.extend([p['path'].rstrip('/')
256 for p in cfs.ls_dir(root_dir, depth)])
257 except (cephfs.ObjectNotFound, cephfs.PermissionError):
258 paths = []
259 return {'paths': paths}
260
261 @Endpoint('GET', '/cephfs/filesystems')
262 @ReadPermission
263 def filesystems(self):
264 return CephFS.list_filesystems()
265
266 @Endpoint()
267 @ReadPermission
268 def status(self):
269 status = {'available': True, 'message': None}
270 try:
271 mgr.remote('nfs', 'cluster_ls')
272 except (ImportError, RuntimeError) as error:
273 logger.exception(error)
274 status['available'] = False
275 status['message'] = str(error) # type: ignore
276
277 return status