1 # -*- coding: utf-8 -*-
6 from functools
import partial
7 from typing
import Any
, Dict
, List
, Optional
10 from mgr_module
import NFS_GANESHA_SUPPORTED_FSALS
13 from ..security
import Scope
14 from ..services
.cephfs
import CephFS
15 from ..services
.exception
import DashboardException
, handle_cephfs_error
, \
16 serialize_dashboard_exception
17 from . import APIDoc
, APIRouter
, BaseController
, Endpoint
, EndpointDoc
, \
18 ReadPermission
, RESTController
, Task
, UIRouter
19 from ._version
import APIVersion
21 logger
= logging
.getLogger('controllers.nfs')
24 class NFSException(DashboardException
):
25 def __init__(self
, msg
):
26 super(NFSException
, self
).__init
__(component
="nfs", msg
=msg
)
29 # documentation helpers
31 'export_id': (int, 'Export ID'),
32 'path': (str, 'Export path'),
33 'cluster_id': (str, 'Cluster identifier'),
34 'pseudo': (str, 'Pseudo FS path'),
35 'access_type': (str, 'Export access type'),
36 'squash': (str, 'Export squash policy'),
37 'security_label': (str, 'Security label'),
38 'protocols': ([int], 'List of protocol types'),
39 'transports': ([str], 'List of transport types'),
41 'name': (str, 'name of FSAL'),
42 'fs_name': (str, 'CephFS filesystem name', True),
43 'sec_label_xattr': (str, 'Name of xattr for security label', True),
44 'user_id': (str, 'User id', True)
45 }, 'FSAL configuration'),
47 'addresses': ([str], 'list of IP addresses'),
48 'access_type': (str, 'Client access type'),
49 'squash': (str, 'Client squash policy')
50 }], 'List of client configurations'),
54 CREATE_EXPORT_SCHEMA
= {
55 'path': (str, 'Export path'),
56 'cluster_id': (str, 'Cluster identifier'),
57 'pseudo': (str, 'Pseudo FS path'),
58 'access_type': (str, 'Export access type'),
59 'squash': (str, 'Export squash policy'),
60 'security_label': (str, 'Security label'),
61 'protocols': ([int], 'List of protocol types'),
62 'transports': ([str], 'List of transport types'),
64 'name': (str, 'name of FSAL'),
65 'fs_name': (str, 'CephFS filesystem name', True),
66 'sec_label_xattr': (str, 'Name of xattr for security label', True)
67 }, 'FSAL configuration'),
69 'addresses': ([str], 'list of IP addresses'),
70 'access_type': (str, 'Client access type'),
71 'squash': (str, 'Client squash policy')
72 }], 'List of client configurations')
76 # pylint: disable=not-callable
77 def NfsTask(name
, metadata
, wait_for
): # noqa: N802
78 def composed_decorator(func
):
79 return Task("nfs/{}".format(name
), metadata
, wait_for
,
80 partial(serialize_dashboard_exception
,
81 include_http_status
=True))(func
)
82 return composed_decorator
85 @APIRouter('/nfs-ganesha/cluster', Scope
.NFS_GANESHA
)
86 @APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha")
87 class NFSGaneshaCluster(RESTController
):
89 @RESTController.MethodMap(version
=APIVersion
.EXPERIMENTAL
)
91 return mgr
.remote('nfs', 'cluster_ls')
94 @APIRouter('/nfs-ganesha/export', Scope
.NFS_GANESHA
)
95 @APIDoc(group
="NFS-Ganesha")
96 class NFSGaneshaExports(RESTController
):
97 RESOURCE_ID
= "cluster_id/export_id"
100 def _get_schema_export(export
: Dict
[str, Any
]) -> Dict
[str, Any
]:
102 Method that avoids returning export info not exposed in the export schema
103 e.g., rgw user access/secret keys.
105 schema_fsal_info
= {}
106 for key
in export
['fsal'].keys():
107 if key
in EXPORT_SCHEMA
['fsal'][0].keys(): # type: ignore
108 schema_fsal_info
[key
] = export
['fsal'][key
]
109 export
['fsal'] = schema_fsal_info
112 @EndpointDoc("List all NFS-Ganesha exports",
113 responses
={200: [EXPORT_SCHEMA
]})
114 def list(self
) -> List
[Dict
[str, Any
]]:
116 for export
in mgr
.remote('nfs', 'export_ls'):
117 exports
.append(self
._get
_schema
_export
(export
))
121 @handle_cephfs_error()
122 @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
123 'cluster_id': '{cluster_id}'}, 2.0)
124 @EndpointDoc("Creates a new NFS-Ganesha export",
125 parameters
=CREATE_EXPORT_SCHEMA
,
126 responses
={201: EXPORT_SCHEMA
})
127 @RESTController.MethodMap(version
=APIVersion(2, 0)) # type: ignore
128 def create(self
, path
, cluster_id
, pseudo
, access_type
,
129 squash
, security_label
, protocols
, transports
, fsal
, clients
) -> Dict
[str, Any
]:
130 export_mgr
= mgr
.remote('nfs', 'fetch_nfs_export_obj')
131 if export_mgr
.get_export_by_pseudo(cluster_id
, pseudo
):
132 raise DashboardException(msg
=f
'Pseudo {pseudo} is already in use.',
134 if hasattr(fsal
, 'user_id'):
135 fsal
.pop('user_id') # mgr/nfs does not let you customize user_id
139 'cluster_id': cluster_id
,
140 'access_type': access_type
,
142 'security_label': security_label
,
143 'protocols': protocols
,
144 'transports': transports
,
148 applied_exports
= export_mgr
.apply_export(cluster_id
, json
.dumps(raw_ex
))
149 if not applied_exports
.has_error
:
150 return self
._get
_schema
_export
(
151 export_mgr
.get_export_by_pseudo(cluster_id
, pseudo
))
152 raise NFSException(f
"Export creation failed {applied_exports.changes[0].msg}")
154 @EndpointDoc("Get an NFS-Ganesha export",
156 'cluster_id': (str, 'Cluster identifier'),
157 'export_id': (str, "Export ID")
159 responses
={200: EXPORT_SCHEMA
})
160 def get(self
, cluster_id
, export_id
) -> Optional
[Dict
[str, Any
]]:
161 export_id
= int(export_id
)
162 export
= mgr
.remote('nfs', 'export_get', cluster_id
, export_id
)
164 export
= self
._get
_schema
_export
(export
)
168 @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
170 @EndpointDoc("Updates an NFS-Ganesha export",
171 parameters
=dict(export_id
=(int, "Export ID"),
172 **CREATE_EXPORT_SCHEMA
),
173 responses
={200: EXPORT_SCHEMA
})
174 @RESTController.MethodMap(version
=APIVersion(2, 0)) # type: ignore
175 def set(self
, cluster_id
, export_id
, path
, pseudo
, access_type
,
176 squash
, security_label
, protocols
, transports
, fsal
, clients
) -> Dict
[str, Any
]:
178 if hasattr(fsal
, 'user_id'):
179 fsal
.pop('user_id') # mgr/nfs does not let you customize user_id
183 'cluster_id': cluster_id
,
184 'export_id': export_id
,
185 'access_type': access_type
,
187 'security_label': security_label
,
188 'protocols': protocols
,
189 'transports': transports
,
194 export_mgr
= mgr
.remote('nfs', 'fetch_nfs_export_obj')
195 applied_exports
= export_mgr
.apply_export(cluster_id
, json
.dumps(raw_ex
))
196 if not applied_exports
.has_error
:
197 return self
._get
_schema
_export
(
198 export_mgr
.get_export_by_pseudo(cluster_id
, pseudo
))
199 raise NFSException(f
"Export creation failed {applied_exports.changes[0].msg}")
201 @NfsTask('delete', {'cluster_id': '{cluster_id}',
202 'export_id': '{export_id}'}, 2.0)
203 @EndpointDoc("Deletes an NFS-Ganesha export",
205 'cluster_id': (str, 'Cluster identifier'),
206 'export_id': (int, "Export ID")
208 @RESTController.MethodMap(version
=APIVersion(2, 0)) # type: ignore
209 def delete(self
, cluster_id
, export_id
):
210 export_id
= int(export_id
)
212 export
= mgr
.remote('nfs', 'export_get', cluster_id
, export_id
)
214 raise DashboardException(
215 http_status_code
=404,
216 msg
=f
'Export with id {export_id} not found.',
218 mgr
.remote('nfs', 'export_rm', cluster_id
, export
['pseudo'])
221 @UIRouter('/nfs-ganesha', Scope
.NFS_GANESHA
)
222 class NFSGaneshaUi(BaseController
):
223 @Endpoint('GET', '/fsals')
226 return NFS_GANESHA_SUPPORTED_FSALS
228 @Endpoint('GET', '/lsdir')
230 def lsdir(self
, fs_name
, root_dir
=None, depth
=1): # pragma: no cover
233 if not root_dir
.startswith('/'):
234 root_dir
= '/{}'.format(root_dir
)
235 root_dir
= os
.path
.normpath(root_dir
)
241 error_msg
= '`depth` must be greater or equal to 0.'
243 logger
.warning("Limiting depth to maximum value of 5: "
244 "input depth=%s", depth
)
247 error_msg
= '`depth` must be an integer.'
250 raise DashboardException(code
=400,
255 cfs
= CephFS(fs_name
)
257 paths
.extend([p
['path'].rstrip('/')
258 for p
in cfs
.ls_dir(root_dir
, depth
)])
259 except (cephfs
.ObjectNotFound
, cephfs
.PermissionError
):
261 return {'paths': paths
}
263 @Endpoint('GET', '/cephfs/filesystems')
265 def filesystems(self
):
266 return CephFS
.list_filesystems()
271 status
= {'available': True, 'message': None}
273 mgr
.remote('nfs', 'cluster_ls')
274 except (ImportError, RuntimeError) as error
:
275 logger
.exception(error
)
276 status
['available'] = False
277 status
['message'] = str(error
) # type: ignore