1 # -*- coding: utf-8 -*-
2 from __future__
import absolute_import
4 from functools
import partial
9 from . import ApiController
, RESTController
, UiApiController
, BaseController
, \
10 Endpoint
, Task
, ReadPermission
, ControllerDoc
, EndpointDoc
12 from ..security
import Scope
13 from ..services
.cephfs
import CephFS
14 from ..services
.cephx
import CephX
15 from ..services
.exception
import serialize_dashboard_exception
16 from ..services
.ganesha
import Ganesha
, GaneshaConf
, NFSException
17 from ..services
.rgw_client
import RgwClient
20 # documentation helpers
22 'export_id': (int, 'Export ID'),
23 'path': (str, 'Export path'),
24 'cluster_id': (str, 'Cluster identifier'),
25 'daemons': ([str], 'List of NFS Ganesha daemons identifiers'),
26 'pseudo': (str, 'Pseudo FS path'),
27 'tag': (str, 'NFSv3 export tag'),
28 'access_type': (str, 'Export access type'),
29 'squash': (str, 'Export squash policy'),
30 'security_label': (str, 'Security label'),
31 'protocols': ([int], 'List of protocol types'),
32 'transports': ([str], 'List of transport types'),
34 'name': (str, 'name of FSAL'),
35 'user_id': (str, 'CephX user id', True),
36 'filesystem': (str, 'CephFS filesystem ID', True),
37 'sec_label_xattr': (str, 'Name of xattr for security label', True),
38 'rgw_user_id': (str, 'RGW user id', True)
39 }, 'FSAL configuration'),
41 'addresses': ([str], 'list of IP addresses'),
42 'access_type': (str, 'Client access type'),
43 'squash': (str, 'Client squash policy')
44 }], 'List of client configurations'),
48 CREATE_EXPORT_SCHEMA
= {
49 'path': (str, 'Export path'),
50 'cluster_id': (str, 'Cluster identifier'),
51 'daemons': ([str], 'List of NFS Ganesha daemons identifiers'),
52 'pseudo': (str, 'Pseudo FS path'),
53 'tag': (str, 'NFSv3 export tag'),
54 'access_type': (str, 'Export access type'),
55 'squash': (str, 'Export squash policy'),
56 'security_label': (str, 'Security label'),
57 'protocols': ([int], 'List of protocol types'),
58 'transports': ([str], 'List of transport types'),
60 'name': (str, 'name of FSAL'),
61 'user_id': (str, 'CephX user id', True),
62 'filesystem': (str, 'CephFS filesystem ID', True),
63 'sec_label_xattr': (str, 'Name of xattr for security label', True),
64 'rgw_user_id': (str, 'RGW user id', True)
65 }, 'FSAL configuration'),
67 'addresses': ([str], 'list of IP addresses'),
68 'access_type': (str, 'Client access type'),
69 'squash': (str, 'Client squash policy')
70 }], 'List of client configurations'),
71 'reload_daemons': (bool,
72 'Trigger reload of NFS-Ganesha daemons configuration',
77 # pylint: disable=not-callable
78 def NfsTask(name
, metadata
, wait_for
):
79 def composed_decorator(func
):
80 return Task("nfs/{}".format(name
), metadata
, wait_for
,
81 partial(serialize_dashboard_exception
,
82 include_http_status
=True))(func
)
83 return composed_decorator
86 @ApiController('/nfs-ganesha', Scope
.NFS_GANESHA
)
87 @ControllerDoc("NFS-Ganesha Management API", "NFS-Ganesha")
88 class NFSGanesha(RESTController
):
90 @EndpointDoc("Status of NFS-Ganesha management feature",
92 'available': (bool, "Is API available?"),
93 'message': (str, "Error message")
98 status
= {'available': True, 'message': None}
100 Ganesha
.get_ganesha_clusters()
101 except NFSException
as e
:
102 status
['message'] = str(e
)
103 status
['available'] = False
108 @ApiController('/nfs-ganesha/export', Scope
.NFS_GANESHA
)
109 @ControllerDoc(group
="NFS-Ganesha")
110 class NFSGaneshaExports(RESTController
):
111 RESOURCE_ID
= "cluster_id/export_id"
113 @EndpointDoc("List all NFS-Ganesha exports",
114 responses
={200: [EXPORT_SCHEMA
]})
117 for cluster_id
in Ganesha
.get_ganesha_clusters():
120 for export
in GaneshaConf
.instance(cluster_id
).list_exports()])
123 @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
124 'cluster_id': '{cluster_id}'}, 2.0)
125 @EndpointDoc("Creates a new NFS-Ganesha export",
126 parameters
=CREATE_EXPORT_SCHEMA
,
127 responses
={201: EXPORT_SCHEMA
})
128 def create(self
, path
, cluster_id
, daemons
, pseudo
, tag
, access_type
,
129 squash
, security_label
, protocols
, transports
, fsal
, clients
,
130 reload_daemons
=True):
131 if fsal
['name'] not in Ganesha
.fsals_available():
132 raise NFSException("Cannot create this export. "
133 "FSAL '{}' cannot be managed by the dashboard."
134 .format(fsal
['name']))
136 ganesha_conf
= GaneshaConf
.instance(cluster_id
)
137 ex_id
= ganesha_conf
.create_export({
140 'cluster_id': cluster_id
,
143 'access_type': access_type
,
145 'security_label': security_label
,
146 'protocols': protocols
,
147 'transports': transports
,
152 ganesha_conf
.reload_daemons(daemons
)
153 return ganesha_conf
.get_export(ex_id
).to_dict()
155 @EndpointDoc("Get an NFS-Ganesha export",
157 'cluster_id': (str, 'Cluster identifier'),
158 'export_id': (int, "Export ID")
160 responses
={200: EXPORT_SCHEMA
})
161 def get(self
, cluster_id
, export_id
):
162 export_id
= int(export_id
)
163 ganesha_conf
= GaneshaConf
.instance(cluster_id
)
164 if not ganesha_conf
.has_export(export_id
):
165 raise cherrypy
.HTTPError(404)
166 return ganesha_conf
.get_export(export_id
).to_dict()
168 @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
170 @EndpointDoc("Updates an NFS-Ganesha export",
171 parameters
=dict(export_id
=(int, "Export ID"),
172 **CREATE_EXPORT_SCHEMA
),
173 responses
={200: EXPORT_SCHEMA
})
174 def set(self
, cluster_id
, export_id
, path
, daemons
, pseudo
, tag
, access_type
,
175 squash
, security_label
, protocols
, transports
, fsal
, clients
,
176 reload_daemons
=True):
177 export_id
= int(export_id
)
178 ganesha_conf
= GaneshaConf
.instance(cluster_id
)
180 if not ganesha_conf
.has_export(export_id
):
181 raise cherrypy
.HTTPError(404)
183 if fsal
['name'] not in Ganesha
.fsals_available():
184 raise NFSException("Cannot make modifications to this export. "
185 "FSAL '{}' cannot be managed by the dashboard."
186 .format(fsal
['name']))
188 old_export
= ganesha_conf
.update_export({
189 'export_id': export_id
,
191 'cluster_id': cluster_id
,
195 'access_type': access_type
,
197 'security_label': security_label
,
198 'protocols': protocols
,
199 'transports': transports
,
203 daemons
= list(daemons
)
204 for d_id
in old_export
.daemons
:
205 if d_id
not in daemons
:
208 ganesha_conf
.reload_daemons(daemons
)
209 return ganesha_conf
.get_export(export_id
).to_dict()
211 @NfsTask('delete', {'cluster_id': '{cluster_id}',
212 'export_id': '{export_id}'}, 2.0)
213 @EndpointDoc("Deletes an NFS-Ganesha export",
215 'cluster_id': (str, 'Cluster identifier'),
216 'export_id': (int, "Export ID"),
217 'reload_daemons': (bool,
218 'Trigger reload of NFS-Ganesha daemons'
222 def delete(self
, cluster_id
, export_id
, reload_daemons
=True):
223 export_id
= int(export_id
)
224 ganesha_conf
= GaneshaConf
.instance(cluster_id
)
226 if not ganesha_conf
.has_export(export_id
):
227 raise cherrypy
.HTTPError(404)
229 export
= ganesha_conf
.remove_export(export_id
)
231 ganesha_conf
.reload_daemons(export
.daemons
)
234 @ApiController('/nfs-ganesha/daemon')
235 @ControllerDoc(group
="NFS-Ganesha")
236 class NFSGaneshaService(RESTController
):
238 @EndpointDoc("List NFS-Ganesha daemons information",
240 'daemon_id': (str, 'Daemon identifier'),
241 'cluster_id': (str, 'Cluster identifier'),
243 'Status of daemon (1=RUNNING, 0=STOPPED, -1=ERROR',
245 'desc': (str, 'Error description (if status==-1)', True)
248 status_dict
= Ganesha
.get_daemons_status()
252 'daemon_id': daemon_id
,
253 'cluster_id': cluster_id
,
254 'status': status_dict
[cluster_id
][daemon_id
]['status'],
255 'desc': status_dict
[cluster_id
][daemon_id
]['desc']
257 for daemon_id
in status_dict
[cluster_id
]
258 for cluster_id
in status_dict
262 for cluster_id
in Ganesha
.get_ganesha_clusters():
264 [{'daemon_id': daemon_id
, 'cluster_id': cluster_id
}
265 for daemon_id
in GaneshaConf
.instance(cluster_id
).list_daemons()])
269 @UiApiController('/nfs-ganesha')
270 class NFSGaneshaUi(BaseController
):
271 @Endpoint('GET', '/cephx/clients')
272 def cephx_clients(self
):
273 return [client
for client
in CephX
.list_clients()]
275 @Endpoint('GET', '/fsals')
277 return Ganesha
.fsals_available()
279 @Endpoint('GET', '/lsdir')
280 def lsdir(self
, root_dir
=None, depth
=1):
285 logger
.warning("[NFS] Limiting depth to maximum value of 5: "
286 "input depth=%s", depth
)
288 root_dir
= '{}/'.format(root_dir
) \
289 if not root_dir
.endswith('/') else root_dir
293 paths
= cfs
.get_dir_list(root_dir
, depth
)
294 paths
= [p
[:-1] for p
in paths
if p
!= root_dir
]
295 return {'paths': paths
}
296 except (cephfs
.ObjectNotFound
, cephfs
.PermissionError
):
299 @Endpoint('GET', '/cephfs/filesystems')
300 def filesystems(self
):
301 return CephFS
.list_filesystems()
303 @Endpoint('GET', '/rgw/buckets')
304 def buckets(self
, user_id
=None):
305 return RgwClient
.instance(user_id
).get_buckets()
307 @Endpoint('GET', '/clusters')
309 return Ganesha
.get_ganesha_clusters()