]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/dashboard/controllers/nfs.py
import ceph 16.2.7
[ceph.git] / ceph / src / pybind / mgr / dashboard / controllers / nfs.py
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3
4 import json
5 import logging
6 import os
7 from functools import partial
8 from typing import Any, Dict, List, Optional
9
10 import cephfs
11 from mgr_module import NFS_GANESHA_SUPPORTED_FSALS
12
13 from .. import mgr
14 from ..security import Scope
15 from ..services.cephfs import CephFS
16 from ..services.exception import DashboardException, serialize_dashboard_exception
17 from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
18 ReadPermission, RESTController, Task, UIRouter
19 from ._version import APIVersion
20
21 logger = logging.getLogger('controllers.nfs')
22
23
24 class NFSException(DashboardException):
25 def __init__(self, msg):
26 super(NFSException, self).__init__(component="nfs", msg=msg)
27
28
29 # documentation helpers
30 EXPORT_SCHEMA = {
31 'export_id': (int, 'Export ID'),
32 'path': (str, 'Export path'),
33 'cluster_id': (str, 'Cluster identifier'),
34 'pseudo': (str, 'Pseudo FS path'),
35 'access_type': (str, 'Export access type'),
36 'squash': (str, 'Export squash policy'),
37 'security_label': (str, 'Security label'),
38 'protocols': ([int], 'List of protocol types'),
39 'transports': ([str], 'List of transport types'),
40 'fsal': ({
41 'name': (str, 'name of FSAL'),
42 'fs_name': (str, 'CephFS filesystem name', True),
43 'sec_label_xattr': (str, 'Name of xattr for security label', True),
44 'user_id': (str, 'User id', True)
45 }, 'FSAL configuration'),
46 'clients': ([{
47 'addresses': ([str], 'list of IP addresses'),
48 'access_type': (str, 'Client access type'),
49 'squash': (str, 'Client squash policy')
50 }], 'List of client configurations'),
51 }
52
53
54 CREATE_EXPORT_SCHEMA = {
55 'path': (str, 'Export path'),
56 'cluster_id': (str, 'Cluster identifier'),
57 'pseudo': (str, 'Pseudo FS path'),
58 'access_type': (str, 'Export access type'),
59 'squash': (str, 'Export squash policy'),
60 'security_label': (str, 'Security label'),
61 'protocols': ([int], 'List of protocol types'),
62 'transports': ([str], 'List of transport types'),
63 'fsal': ({
64 'name': (str, 'name of FSAL'),
65 'fs_name': (str, 'CephFS filesystem name', True),
66 'sec_label_xattr': (str, 'Name of xattr for security label', True)
67 }, 'FSAL configuration'),
68 'clients': ([{
69 'addresses': ([str], 'list of IP addresses'),
70 'access_type': (str, 'Client access type'),
71 'squash': (str, 'Client squash policy')
72 }], 'List of client configurations')
73 }
74
75
76 # pylint: disable=not-callable
77 def NfsTask(name, metadata, wait_for): # noqa: N802
78 def composed_decorator(func):
79 return Task("nfs/{}".format(name), metadata, wait_for,
80 partial(serialize_dashboard_exception,
81 include_http_status=True))(func)
82 return composed_decorator
83
84
85 @APIRouter('/nfs-ganesha', Scope.NFS_GANESHA)
86 @APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha")
87 class NFSGanesha(RESTController):
88
89 @EndpointDoc("Status of NFS-Ganesha management feature",
90 responses={200: {
91 'available': (bool, "Is API available?"),
92 'message': (str, "Error message")
93 }})
94 @Endpoint()
95 @ReadPermission
96 def status(self):
97 status = {'available': True, 'message': None}
98 try:
99 mgr.remote('nfs', 'cluster_ls')
100 except ImportError as error:
101 logger.exception(error)
102 status['available'] = False
103 status['message'] = str(error) # type: ignore
104
105 return status
106
107
108 @APIRouter('/nfs-ganesha/cluster', Scope.NFS_GANESHA)
109 @APIDoc(group="NFS-Ganesha")
110 class NFSGaneshaCluster(RESTController):
111 @ReadPermission
112 @RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
113 def list(self):
114 return mgr.remote('nfs', 'cluster_ls')
115
116
117 @APIRouter('/nfs-ganesha/export', Scope.NFS_GANESHA)
118 @APIDoc(group="NFS-Ganesha")
119 class NFSGaneshaExports(RESTController):
120 RESOURCE_ID = "cluster_id/export_id"
121
122 @staticmethod
123 def _get_schema_export(export: Dict[str, Any]) -> Dict[str, Any]:
124 """
125 Method that avoids returning export info not exposed in the export schema
126 e.g., rgw user access/secret keys.
127 """
128 schema_fsal_info = {}
129 for key in export['fsal'].keys():
130 if key in EXPORT_SCHEMA['fsal'][0].keys(): # type: ignore
131 schema_fsal_info[key] = export['fsal'][key]
132 export['fsal'] = schema_fsal_info
133 return export
134
135 @EndpointDoc("List all NFS-Ganesha exports",
136 responses={200: [EXPORT_SCHEMA]})
137 def list(self) -> List[Dict[str, Any]]:
138 exports = []
139 for export in mgr.remote('nfs', 'export_ls'):
140 exports.append(self._get_schema_export(export))
141
142 return exports
143
144 @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
145 'cluster_id': '{cluster_id}'}, 2.0)
146 @EndpointDoc("Creates a new NFS-Ganesha export",
147 parameters=CREATE_EXPORT_SCHEMA,
148 responses={201: EXPORT_SCHEMA})
149 @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
150 def create(self, path, cluster_id, pseudo, access_type,
151 squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
152 export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
153 if export_mgr.get_export_by_pseudo(cluster_id, pseudo):
154 raise DashboardException(msg=f'Pseudo {pseudo} is already in use.',
155 component='nfs')
156 if hasattr(fsal, 'user_id'):
157 fsal.pop('user_id') # mgr/nfs does not let you customize user_id
158 raw_ex = {
159 'path': path,
160 'pseudo': pseudo,
161 'cluster_id': cluster_id,
162 'access_type': access_type,
163 'squash': squash,
164 'security_label': security_label,
165 'protocols': protocols,
166 'transports': transports,
167 'fsal': fsal,
168 'clients': clients
169 }
170 ret, _, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
171 if ret == 0:
172 return self._get_schema_export(
173 export_mgr.get_export_by_pseudo(cluster_id, pseudo))
174 raise NFSException(f"Export creation failed {err}")
175
176 @EndpointDoc("Get an NFS-Ganesha export",
177 parameters={
178 'cluster_id': (str, 'Cluster identifier'),
179 'export_id': (str, "Export ID")
180 },
181 responses={200: EXPORT_SCHEMA})
182 def get(self, cluster_id, export_id) -> Optional[Dict[str, Any]]:
183 export_id = int(export_id)
184 export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
185 if export:
186 export = self._get_schema_export(export)
187
188 return export
189
190 @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
191 2.0)
192 @EndpointDoc("Updates an NFS-Ganesha export",
193 parameters=dict(export_id=(int, "Export ID"),
194 **CREATE_EXPORT_SCHEMA),
195 responses={200: EXPORT_SCHEMA})
196 @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
197 def set(self, cluster_id, export_id, path, pseudo, access_type,
198 squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
199
200 if hasattr(fsal, 'user_id'):
201 fsal.pop('user_id') # mgr/nfs does not let you customize user_id
202 raw_ex = {
203 'path': path,
204 'pseudo': pseudo,
205 'cluster_id': cluster_id,
206 'export_id': export_id,
207 'access_type': access_type,
208 'squash': squash,
209 'security_label': security_label,
210 'protocols': protocols,
211 'transports': transports,
212 'fsal': fsal,
213 'clients': clients
214 }
215
216 export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
217 ret, _, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
218 if ret == 0:
219 return self._get_schema_export(
220 export_mgr.get_export_by_pseudo(cluster_id, pseudo))
221 raise NFSException(f"Failed to update export: {err}")
222
223 @NfsTask('delete', {'cluster_id': '{cluster_id}',
224 'export_id': '{export_id}'}, 2.0)
225 @EndpointDoc("Deletes an NFS-Ganesha export",
226 parameters={
227 'cluster_id': (str, 'Cluster identifier'),
228 'export_id': (int, "Export ID")
229 })
230 @RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
231 def delete(self, cluster_id, export_id):
232 export_id = int(export_id)
233
234 export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
235 if not export:
236 raise DashboardException(
237 http_status_code=404,
238 msg=f'Export with id {export_id} not found.',
239 component='nfs')
240 mgr.remote('nfs', 'export_rm', cluster_id, export['pseudo'])
241
242
243 @UIRouter('/nfs-ganesha', Scope.NFS_GANESHA)
244 class NFSGaneshaUi(BaseController):
245 @Endpoint('GET', '/fsals')
246 @ReadPermission
247 def fsals(self):
248 return NFS_GANESHA_SUPPORTED_FSALS
249
250 @Endpoint('GET', '/lsdir')
251 @ReadPermission
252 def lsdir(self, fs_name, root_dir=None, depth=1): # pragma: no cover
253 if root_dir is None:
254 root_dir = "/"
255 if not root_dir.startswith('/'):
256 root_dir = '/{}'.format(root_dir)
257 root_dir = os.path.normpath(root_dir)
258
259 try:
260 depth = int(depth)
261 error_msg = ''
262 if depth < 0:
263 error_msg = '`depth` must be greater or equal to 0.'
264 if depth > 5:
265 logger.warning("Limiting depth to maximum value of 5: "
266 "input depth=%s", depth)
267 depth = 5
268 except ValueError:
269 error_msg = '`depth` must be an integer.'
270 finally:
271 if error_msg:
272 raise DashboardException(code=400,
273 component='nfs',
274 msg=error_msg)
275
276 try:
277 cfs = CephFS(fs_name)
278 paths = [root_dir]
279 paths.extend([p['path'].rstrip('/')
280 for p in cfs.ls_dir(root_dir, depth)])
281 except (cephfs.ObjectNotFound, cephfs.PermissionError):
282 paths = []
283 return {'paths': paths}
284
285 @Endpoint('GET', '/cephfs/filesystems')
286 @ReadPermission
287 def filesystems(self):
288 return CephFS.list_filesystems()