1 # -*- coding: utf-8 -*-
6 from functools
import partial
7 from typing
import Any
, Dict
, List
, Optional
10 from mgr_module
import NFS_GANESHA_SUPPORTED_FSALS
13 from ..security
import Scope
14 from ..services
.cephfs
import CephFS
15 from ..services
.exception
import DashboardException
, serialize_dashboard_exception
16 from . import APIDoc
, APIRouter
, BaseController
, Endpoint
, EndpointDoc
, \
17 ReadPermission
, RESTController
, Task
, UIRouter
18 from ._version
import APIVersion
20 logger
= logging
.getLogger('controllers.nfs')
23 class NFSException(DashboardException
):
24 def __init__(self
, msg
):
25 super(NFSException
, self
).__init
__(component
="nfs", msg
=msg
)
28 # documentation helpers
30 'export_id': (int, 'Export ID'),
31 'path': (str, 'Export path'),
32 'cluster_id': (str, 'Cluster identifier'),
33 'pseudo': (str, 'Pseudo FS path'),
34 'access_type': (str, 'Export access type'),
35 'squash': (str, 'Export squash policy'),
36 'security_label': (str, 'Security label'),
37 'protocols': ([int], 'List of protocol types'),
38 'transports': ([str], 'List of transport types'),
40 'name': (str, 'name of FSAL'),
41 'fs_name': (str, 'CephFS filesystem name', True),
42 'sec_label_xattr': (str, 'Name of xattr for security label', True),
43 'user_id': (str, 'User id', True)
44 }, 'FSAL configuration'),
46 'addresses': ([str], 'list of IP addresses'),
47 'access_type': (str, 'Client access type'),
48 'squash': (str, 'Client squash policy')
49 }], 'List of client configurations'),
53 CREATE_EXPORT_SCHEMA
= {
54 'path': (str, 'Export path'),
55 'cluster_id': (str, 'Cluster identifier'),
56 'pseudo': (str, 'Pseudo FS path'),
57 'access_type': (str, 'Export access type'),
58 'squash': (str, 'Export squash policy'),
59 'security_label': (str, 'Security label'),
60 'protocols': ([int], 'List of protocol types'),
61 'transports': ([str], 'List of transport types'),
63 'name': (str, 'name of FSAL'),
64 'fs_name': (str, 'CephFS filesystem name', True),
65 'sec_label_xattr': (str, 'Name of xattr for security label', True)
66 }, 'FSAL configuration'),
68 'addresses': ([str], 'list of IP addresses'),
69 'access_type': (str, 'Client access type'),
70 'squash': (str, 'Client squash policy')
71 }], 'List of client configurations')
75 # pylint: disable=not-callable
76 def NfsTask(name
, metadata
, wait_for
): # noqa: N802
77 def composed_decorator(func
):
78 return Task("nfs/{}".format(name
), metadata
, wait_for
,
79 partial(serialize_dashboard_exception
,
80 include_http_status
=True))(func
)
81 return composed_decorator
84 @APIRouter('/nfs-ganesha/cluster', Scope
.NFS_GANESHA
)
85 @APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha")
86 class NFSGaneshaCluster(RESTController
):
88 @RESTController.MethodMap(version
=APIVersion
.EXPERIMENTAL
)
90 return mgr
.remote('nfs', 'cluster_ls')
93 @APIRouter('/nfs-ganesha/export', Scope
.NFS_GANESHA
)
94 @APIDoc(group
="NFS-Ganesha")
95 class NFSGaneshaExports(RESTController
):
96 RESOURCE_ID
= "cluster_id/export_id"
99 def _get_schema_export(export
: Dict
[str, Any
]) -> Dict
[str, Any
]:
101 Method that avoids returning export info not exposed in the export schema
102 e.g., rgw user access/secret keys.
104 schema_fsal_info
= {}
105 for key
in export
['fsal'].keys():
106 if key
in EXPORT_SCHEMA
['fsal'][0].keys(): # type: ignore
107 schema_fsal_info
[key
] = export
['fsal'][key
]
108 export
['fsal'] = schema_fsal_info
111 @EndpointDoc("List all NFS-Ganesha exports",
112 responses
={200: [EXPORT_SCHEMA
]})
113 def list(self
) -> List
[Dict
[str, Any
]]:
115 for export
in mgr
.remote('nfs', 'export_ls'):
116 exports
.append(self
._get
_schema
_export
(export
))
120 @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
121 'cluster_id': '{cluster_id}'}, 2.0)
122 @EndpointDoc("Creates a new NFS-Ganesha export",
123 parameters
=CREATE_EXPORT_SCHEMA
,
124 responses
={201: EXPORT_SCHEMA
})
125 @RESTController.MethodMap(version
=APIVersion(2, 0)) # type: ignore
126 def create(self
, path
, cluster_id
, pseudo
, access_type
,
127 squash
, security_label
, protocols
, transports
, fsal
, clients
) -> Dict
[str, Any
]:
128 export_mgr
= mgr
.remote('nfs', 'fetch_nfs_export_obj')
129 if export_mgr
.get_export_by_pseudo(cluster_id
, pseudo
):
130 raise DashboardException(msg
=f
'Pseudo {pseudo} is already in use.',
132 if hasattr(fsal
, 'user_id'):
133 fsal
.pop('user_id') # mgr/nfs does not let you customize user_id
137 'cluster_id': cluster_id
,
138 'access_type': access_type
,
140 'security_label': security_label
,
141 'protocols': protocols
,
142 'transports': transports
,
146 ret
, _
, err
= export_mgr
.apply_export(cluster_id
, json
.dumps(raw_ex
))
148 return self
._get
_schema
_export
(
149 export_mgr
.get_export_by_pseudo(cluster_id
, pseudo
))
150 raise NFSException(f
"Export creation failed {err}")
152 @EndpointDoc("Get an NFS-Ganesha export",
154 'cluster_id': (str, 'Cluster identifier'),
155 'export_id': (str, "Export ID")
157 responses
={200: EXPORT_SCHEMA
})
158 def get(self
, cluster_id
, export_id
) -> Optional
[Dict
[str, Any
]]:
159 export_id
= int(export_id
)
160 export
= mgr
.remote('nfs', 'export_get', cluster_id
, export_id
)
162 export
= self
._get
_schema
_export
(export
)
166 @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
168 @EndpointDoc("Updates an NFS-Ganesha export",
169 parameters
=dict(export_id
=(int, "Export ID"),
170 **CREATE_EXPORT_SCHEMA
),
171 responses
={200: EXPORT_SCHEMA
})
172 @RESTController.MethodMap(version
=APIVersion(2, 0)) # type: ignore
173 def set(self
, cluster_id
, export_id
, path
, pseudo
, access_type
,
174 squash
, security_label
, protocols
, transports
, fsal
, clients
) -> Dict
[str, Any
]:
176 if hasattr(fsal
, 'user_id'):
177 fsal
.pop('user_id') # mgr/nfs does not let you customize user_id
181 'cluster_id': cluster_id
,
182 'export_id': export_id
,
183 'access_type': access_type
,
185 'security_label': security_label
,
186 'protocols': protocols
,
187 'transports': transports
,
192 export_mgr
= mgr
.remote('nfs', 'fetch_nfs_export_obj')
193 ret
, _
, err
= export_mgr
.apply_export(cluster_id
, json
.dumps(raw_ex
))
195 return self
._get
_schema
_export
(
196 export_mgr
.get_export_by_pseudo(cluster_id
, pseudo
))
197 raise NFSException(f
"Failed to update export: {err}")
199 @NfsTask('delete', {'cluster_id': '{cluster_id}',
200 'export_id': '{export_id}'}, 2.0)
201 @EndpointDoc("Deletes an NFS-Ganesha export",
203 'cluster_id': (str, 'Cluster identifier'),
204 'export_id': (int, "Export ID")
206 @RESTController.MethodMap(version
=APIVersion(2, 0)) # type: ignore
207 def delete(self
, cluster_id
, export_id
):
208 export_id
= int(export_id
)
210 export
= mgr
.remote('nfs', 'export_get', cluster_id
, export_id
)
212 raise DashboardException(
213 http_status_code
=404,
214 msg
=f
'Export with id {export_id} not found.',
216 mgr
.remote('nfs', 'export_rm', cluster_id
, export
['pseudo'])
219 @UIRouter('/nfs-ganesha', Scope
.NFS_GANESHA
)
220 class NFSGaneshaUi(BaseController
):
221 @Endpoint('GET', '/fsals')
224 return NFS_GANESHA_SUPPORTED_FSALS
226 @Endpoint('GET', '/lsdir')
228 def lsdir(self
, fs_name
, root_dir
=None, depth
=1): # pragma: no cover
231 if not root_dir
.startswith('/'):
232 root_dir
= '/{}'.format(root_dir
)
233 root_dir
= os
.path
.normpath(root_dir
)
239 error_msg
= '`depth` must be greater or equal to 0.'
241 logger
.warning("Limiting depth to maximum value of 5: "
242 "input depth=%s", depth
)
245 error_msg
= '`depth` must be an integer.'
248 raise DashboardException(code
=400,
253 cfs
= CephFS(fs_name
)
255 paths
.extend([p
['path'].rstrip('/')
256 for p
in cfs
.ls_dir(root_dir
, depth
)])
257 except (cephfs
.ObjectNotFound
, cephfs
.PermissionError
):
259 return {'paths': paths
}
261 @Endpoint('GET', '/cephfs/filesystems')
263 def filesystems(self
):
264 return CephFS
.list_filesystems()
269 status
= {'available': True, 'message': None}
271 mgr
.remote('nfs', 'cluster_ls')
272 except (ImportError, RuntimeError) as error
:
273 logger
.exception(error
)
274 status
['available'] = False
275 status
['message'] = str(error
) # type: ignore