1 # -*- coding: utf-8 -*-
2 from __future__
import absolute_import
6 from functools
import partial
11 from ..security
import Scope
12 from ..services
.cephfs
import CephFS
13 from ..services
.cephx
import CephX
14 from ..services
.exception
import DashboardException
, serialize_dashboard_exception
15 from ..services
.ganesha
import Ganesha
, GaneshaConf
, NFSException
16 from ..services
.rgw_client
import NoCredentialsException
, \
17 NoRgwDaemonsException
, RequestException
, RgwClient
18 from . import ApiController
, BaseController
, ControllerDoc
, Endpoint
, \
19 EndpointDoc
, ReadPermission
, RESTController
, Task
, UiApiController
21 logger
= logging
.getLogger('controllers.ganesha')
24 # documentation helpers
26 'export_id': (int, 'Export ID'),
27 'path': (str, 'Export path'),
28 'cluster_id': (str, 'Cluster identifier'),
29 'daemons': ([str], 'List of NFS Ganesha daemons identifiers'),
30 'pseudo': (str, 'Pseudo FS path'),
31 'tag': (str, 'NFSv3 export tag'),
32 'access_type': (str, 'Export access type'),
33 'squash': (str, 'Export squash policy'),
34 'security_label': (str, 'Security label'),
35 'protocols': ([int], 'List of protocol types'),
36 'transports': ([str], 'List of transport types'),
38 'name': (str, 'name of FSAL'),
39 'user_id': (str, 'CephX user id', True),
40 'filesystem': (str, 'CephFS filesystem ID', True),
41 'sec_label_xattr': (str, 'Name of xattr for security label', True),
42 'rgw_user_id': (str, 'RGW user id', True)
43 }, 'FSAL configuration'),
45 'addresses': ([str], 'list of IP addresses'),
46 'access_type': (str, 'Client access type'),
47 'squash': (str, 'Client squash policy')
48 }], 'List of client configurations'),
52 CREATE_EXPORT_SCHEMA
= {
53 'path': (str, 'Export path'),
54 'cluster_id': (str, 'Cluster identifier'),
55 'daemons': ([str], 'List of NFS Ganesha daemons identifiers'),
56 'pseudo': (str, 'Pseudo FS path'),
57 'tag': (str, 'NFSv3 export tag'),
58 'access_type': (str, 'Export access type'),
59 'squash': (str, 'Export squash policy'),
60 'security_label': (str, 'Security label'),
61 'protocols': ([int], 'List of protocol types'),
62 'transports': ([str], 'List of transport types'),
64 'name': (str, 'name of FSAL'),
65 'user_id': (str, 'CephX user id', True),
66 'filesystem': (str, 'CephFS filesystem ID', True),
67 'sec_label_xattr': (str, 'Name of xattr for security label', True),
68 'rgw_user_id': (str, 'RGW user id', True)
69 }, 'FSAL configuration'),
71 'addresses': ([str], 'list of IP addresses'),
72 'access_type': (str, 'Client access type'),
73 'squash': (str, 'Client squash policy')
74 }], 'List of client configurations'),
75 'reload_daemons': (bool,
76 'Trigger reload of NFS-Ganesha daemons configuration',
81 # pylint: disable=not-callable
82 def NfsTask(name
, metadata
, wait_for
): # noqa: N802
83 def composed_decorator(func
):
84 return Task("nfs/{}".format(name
), metadata
, wait_for
,
85 partial(serialize_dashboard_exception
,
86 include_http_status
=True))(func
)
87 return composed_decorator
90 @ApiController('/nfs-ganesha', Scope
.NFS_GANESHA
)
91 @ControllerDoc("NFS-Ganesha Management API", "NFS-Ganesha")
92 class NFSGanesha(RESTController
):
94 @EndpointDoc("Status of NFS-Ganesha management feature",
96 'available': (bool, "Is API available?"),
97 'message': (str, "Error message")
102 status
= {'available': True, 'message': None}
104 Ganesha
.get_ganesha_clusters()
105 except NFSException
as e
:
106 status
['message'] = str(e
) # type: ignore
107 status
['available'] = False
112 @ApiController('/nfs-ganesha/export', Scope
.NFS_GANESHA
)
113 @ControllerDoc(group
="NFS-Ganesha")
114 class NFSGaneshaExports(RESTController
):
115 RESOURCE_ID
= "cluster_id/export_id"
117 @EndpointDoc("List all NFS-Ganesha exports",
118 responses
={200: [EXPORT_SCHEMA
]})
121 for cluster_id
in Ganesha
.get_ganesha_clusters():
124 for export
in GaneshaConf
.instance(cluster_id
).list_exports()])
127 @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
128 'cluster_id': '{cluster_id}'}, 2.0)
129 @EndpointDoc("Creates a new NFS-Ganesha export",
130 parameters
=CREATE_EXPORT_SCHEMA
,
131 responses
={201: EXPORT_SCHEMA
})
132 def create(self
, path
, cluster_id
, daemons
, pseudo
, tag
, access_type
,
133 squash
, security_label
, protocols
, transports
, fsal
, clients
,
134 reload_daemons
=True):
135 if fsal
['name'] not in Ganesha
.fsals_available():
136 raise NFSException("Cannot create this export. "
137 "FSAL '{}' cannot be managed by the dashboard."
138 .format(fsal
['name']))
140 ganesha_conf
= GaneshaConf
.instance(cluster_id
)
141 ex_id
= ganesha_conf
.create_export({
144 'cluster_id': cluster_id
,
147 'access_type': access_type
,
149 'security_label': security_label
,
150 'protocols': protocols
,
151 'transports': transports
,
156 ganesha_conf
.reload_daemons(daemons
)
157 return ganesha_conf
.get_export(ex_id
).to_dict()
159 @EndpointDoc("Get an NFS-Ganesha export",
161 'cluster_id': (str, 'Cluster identifier'),
162 'export_id': (int, "Export ID")
164 responses
={200: EXPORT_SCHEMA
})
165 def get(self
, cluster_id
, export_id
):
166 export_id
= int(export_id
)
167 ganesha_conf
= GaneshaConf
.instance(cluster_id
)
168 if not ganesha_conf
.has_export(export_id
):
169 raise cherrypy
.HTTPError(404)
170 return ganesha_conf
.get_export(export_id
).to_dict()
172 @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
174 @EndpointDoc("Updates an NFS-Ganesha export",
175 parameters
=dict(export_id
=(int, "Export ID"),
176 **CREATE_EXPORT_SCHEMA
),
177 responses
={200: EXPORT_SCHEMA
})
178 def set(self
, cluster_id
, export_id
, path
, daemons
, pseudo
, tag
, access_type
,
179 squash
, security_label
, protocols
, transports
, fsal
, clients
,
180 reload_daemons
=True):
181 export_id
= int(export_id
)
182 ganesha_conf
= GaneshaConf
.instance(cluster_id
)
184 if not ganesha_conf
.has_export(export_id
):
185 raise cherrypy
.HTTPError(404) # pragma: no cover - the handling is too obvious
187 if fsal
['name'] not in Ganesha
.fsals_available():
188 raise NFSException("Cannot make modifications to this export. "
189 "FSAL '{}' cannot be managed by the dashboard."
190 .format(fsal
['name']))
192 old_export
= ganesha_conf
.update_export({
193 'export_id': export_id
,
195 'cluster_id': cluster_id
,
199 'access_type': access_type
,
201 'security_label': security_label
,
202 'protocols': protocols
,
203 'transports': transports
,
207 daemons
= list(daemons
)
208 for d_id
in old_export
.daemons
:
209 if d_id
not in daemons
:
212 ganesha_conf
.reload_daemons(daemons
)
213 return ganesha_conf
.get_export(export_id
).to_dict()
215 @NfsTask('delete', {'cluster_id': '{cluster_id}',
216 'export_id': '{export_id}'}, 2.0)
217 @EndpointDoc("Deletes an NFS-Ganesha export",
219 'cluster_id': (str, 'Cluster identifier'),
220 'export_id': (int, "Export ID"),
221 'reload_daemons': (bool,
222 'Trigger reload of NFS-Ganesha daemons'
226 def delete(self
, cluster_id
, export_id
, reload_daemons
=True):
227 export_id
= int(export_id
)
228 ganesha_conf
= GaneshaConf
.instance(cluster_id
)
230 if not ganesha_conf
.has_export(export_id
):
231 raise cherrypy
.HTTPError(404) # pragma: no cover - the handling is too obvious
232 export
= ganesha_conf
.remove_export(export_id
)
234 ganesha_conf
.reload_daemons(export
.daemons
)
237 @ApiController('/nfs-ganesha/daemon', Scope
.NFS_GANESHA
)
238 @ControllerDoc(group
="NFS-Ganesha")
239 class NFSGaneshaService(RESTController
):
241 @EndpointDoc("List NFS-Ganesha daemons information",
243 'daemon_id': (str, 'Daemon identifier'),
244 'cluster_id': (str, 'Cluster identifier'),
245 'cluster_type': (str, 'Cluster type'),
246 'status': (int, 'Status of daemon', True),
247 'desc': (str, 'Status description', True)
251 for cluster_id
in Ganesha
.get_ganesha_clusters():
252 result
.extend(GaneshaConf
.instance(cluster_id
).list_daemons())
256 @UiApiController('/nfs-ganesha', Scope
.NFS_GANESHA
)
257 class NFSGaneshaUi(BaseController
):
258 @Endpoint('GET', '/cephx/clients')
260 def cephx_clients(self
):
261 return list(CephX
.list_clients())
263 @Endpoint('GET', '/fsals')
266 return Ganesha
.fsals_available()
268 @Endpoint('GET', '/lsdir')
270 def lsdir(self
, fs_name
, root_dir
=None, depth
=1): # pragma: no cover
273 if not root_dir
.startswith('/'):
274 root_dir
= '/{}'.format(root_dir
)
275 root_dir
= os
.path
.normpath(root_dir
)
281 error_msg
= '`depth` must be greater or equal to 0.'
283 logger
.warning("Limiting depth to maximum value of 5: "
284 "input depth=%s", depth
)
287 error_msg
= '`depth` must be an integer.'
290 raise DashboardException(code
=400,
291 component
='nfsganesha',
295 cfs
= CephFS(fs_name
)
297 paths
.extend([p
['path'].rstrip('/')
298 for p
in cfs
.ls_dir(root_dir
, depth
)])
299 except (cephfs
.ObjectNotFound
, cephfs
.PermissionError
):
301 return {'paths': paths
}
303 @Endpoint('GET', '/cephfs/filesystems')
305 def filesystems(self
):
306 return CephFS
.list_filesystems()
308 @Endpoint('GET', '/rgw/buckets')
310 def buckets(self
, user_id
=None):
312 return RgwClient
.instance(user_id
).get_buckets()
313 except (DashboardException
, NoCredentialsException
, RequestException
,
314 NoRgwDaemonsException
):
317 @Endpoint('GET', '/clusters')
320 return Ganesha
.get_ganesha_clusters()