]>
git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/nfs/export.py
4 from typing
import List
5 from os
.path
import isabs
, normpath
7 from rados
import TimedOut
, ObjectNotFound
9 from .export_utils
import GaneshaConfParser
, Export
10 from .exception
import NFSException
, NFSInvalidOperation
, NFSObjectNotFound
, FSNotFound
, \
12 from .utils
import POOL_NAME
, available_clusters
, restart_nfs_service
, check_fs
14 log
= logging
.getLogger(__name__
)
17 def export_cluster_checker(func
):
18 def cluster_check(fs_export
, *args
, **kwargs
):
20 This method checks if cluster exists and sets rados namespace.
22 if kwargs
['cluster_id'] not in available_clusters(fs_export
.mgr
):
23 return -errno
.ENOENT
, "", "Cluster does not exists"
24 fs_export
.rados_namespace
= kwargs
['cluster_id']
25 return func(fs_export
, *args
, **kwargs
)
29 def exception_handler(exception_obj
, log_msg
=""):
31 log
.exception(log_msg
)
32 return getattr(exception_obj
, 'errno', -1), "", str(exception_obj
)
36 def __init__(self
, mgr
, namespace
):
39 self
.namespace
= namespace
41 def _make_rados_url(self
, obj
):
42 return "rados://{}/{}/{}".format(self
.pool
, self
.namespace
, obj
)
44 def _create_url_block(self
, obj_name
):
45 return {'block_name': '%url', 'value': self
._make
_rados
_url
(obj_name
)}
47 def write_obj(self
, conf_block
, obj
, config_obj
=''):
49 conf_block
= GaneshaConfParser
.write_block(conf_block
)
51 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
52 ioctx
.set_namespace(self
.namespace
)
53 ioctx
.write_full(obj
, conf_block
.encode('utf-8'))
55 # Return after creating empty common config object
57 log
.debug("write configuration into rados object "
58 f
"{self.pool}/{self.namespace}/{obj}:\n{conf_block}")
60 # Add created obj url to common config obj
61 ioctx
.append(config_obj
, GaneshaConfParser
.write_block(
62 self
._create
_url
_block
(obj
)).encode('utf-8'))
63 ExportMgr
._check
_rados
_notify
(ioctx
, config_obj
)
64 log
.debug(f
"Added {obj} url to {config_obj}")
66 def update_obj(self
, conf_block
, obj
, config_obj
):
67 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
68 ioctx
.set_namespace(self
.namespace
)
69 ioctx
.write_full(obj
, conf_block
.encode('utf-8'))
70 log
.debug("write configuration into rados object "
71 f
"{self.pool}/{self.namespace}/{obj}:\n{conf_block}")
72 ExportMgr
._check
_rados
_notify
(ioctx
, config_obj
)
73 log
.debug(f
"Update export {obj} in {config_obj}")
75 def remove_obj(self
, obj
, config_obj
):
76 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
77 ioctx
.set_namespace(self
.namespace
)
78 export_urls
= ioctx
.read(config_obj
)
79 url
= '%url "{}"\n\n'.format(self
._make
_rados
_url
(obj
))
80 export_urls
= export_urls
.replace(url
.encode('utf-8'), b
'')
81 ioctx
.remove_object(obj
)
82 ioctx
.write_full(config_obj
, export_urls
)
83 ExportMgr
._check
_rados
_notify
(ioctx
, config_obj
)
84 log
.debug("Object deleted: {}".format(url
))
86 def remove_all_obj(self
):
87 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
88 ioctx
.set_namespace(self
.namespace
)
89 for obj
in ioctx
.list_objects():
92 def check_user_config(self
):
93 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
94 ioctx
.set_namespace(self
.namespace
)
95 for obj
in ioctx
.list_objects():
96 if obj
.key
.startswith("userconf-nfs"):
101 class ValidateExport
:
103 def pseudo_path(path
):
104 if not isabs(path
) or path
== "/":
105 raise NFSInvalidOperation(f
"pseudo path {path} is invalid. It should be an absolute "
106 "path and it cannot be just '/'.")
110 valid_squash_ls
= ["root", "root_squash", "rootsquash", "rootid", "root_id_squash",
111 "rootidsquash", "all", "all_squash", "allsquash", "all_anomnymous",
112 "allanonymous", "no_root_squash", "none", "noidsquash"]
113 if squash
not in valid_squash_ls
:
114 raise NFSInvalidOperation(f
"squash {squash} not in valid list {valid_squash_ls}")
117 def security_label(label
):
118 if not isinstance(label
, bool):
119 raise NFSInvalidOperation('Only boolean values allowed')
122 def protocols(proto
):
125 raise NFSInvalidOperation(f
"Invalid protocol {p}")
127 log
.warning("NFS V3 is an old version, it might not work")
130 def transport(transport
):
131 valid_transport
= ["UDP", "TCP"]
132 for trans
in transport
:
133 if trans
.upper() not in valid_transport
:
134 raise NFSInvalidOperation(f
'{trans} is not a valid transport protocol')
137 def access_type(access_type
):
138 valid_ones
= ['RW', 'RO']
139 if access_type
not in valid_ones
:
140 raise NFSInvalidOperation(f
'{access_type} is invalid, valid access type are'
144 def fsal(mgr
, old
, new
):
145 if old
.name
!= new
['name']:
146 raise NFSInvalidOperation('FSAL name change not allowed')
147 if old
.user_id
!= new
['user_id']:
148 raise NFSInvalidOperation('User ID modification is not allowed')
149 if new
['sec_label_xattr']:
150 raise NFSInvalidOperation('Security label xattr cannot be changed')
151 if old
.fs_name
!= new
['fs_name']:
152 if not check_fs(mgr
, new
['fs_name']):
153 raise FSNotFound(new
['fs_name'])
158 ValidateExport
.access_type(client
['access_type'])
159 ValidateExport
.squash(client
['squash'])
162 def clients(clients_ls
):
163 for client
in clients_ls
:
164 ValidateExport
._client
(client
)
168 def __init__(self
, mgr
, namespace
=None, export_ls
=None):
170 self
.rados_pool
= POOL_NAME
171 self
.rados_namespace
= namespace
172 self
._exports
= export_ls
175 def _check_rados_notify(ioctx
, obj
):
179 log
.exception(f
"Ganesha timed out")
183 if self
._exports
is None:
185 log
.info("Begin export parsing")
186 for cluster_id
in available_clusters(self
.mgr
):
187 self
.export_conf_objs
= [] # type: List[Export]
188 self
._read
_raw
_config
(cluster_id
)
189 self
.exports
[cluster_id
] = self
.export_conf_objs
190 log
.info(f
"Exports parsed successfully {self.exports.items()}")
193 def _fetch_export(self
, pseudo_path
):
195 for ex
in self
.exports
[self
.rados_namespace
]:
196 if ex
.pseudo
== pseudo_path
:
201 def _delete_user(self
, entity
):
202 self
.mgr
.check_mon_command({
204 'entity': 'client.{}'.format(entity
),
206 log
.info(f
"Export user deleted is {entity}")
208 def _gen_export_id(self
):
209 exports
= sorted([ex
.export_id
for ex
in self
.exports
[self
.rados_namespace
]])
218 def _read_raw_config(self
, rados_namespace
):
219 with self
.mgr
.rados
.open_ioctx(self
.rados_pool
) as ioctx
:
220 ioctx
.set_namespace(rados_namespace
)
221 for obj
in ioctx
.list_objects():
222 if obj
.key
.startswith("export-"):
224 raw_config
= obj
.read(size
)
225 raw_config
= raw_config
.decode("utf-8")
226 log
.debug("read export configuration from rados "
227 "object %s/%s/%s:\n%s", self
.rados_pool
,
228 rados_namespace
, obj
.key
, raw_config
)
229 self
.export_conf_objs
.append(Export
.from_export_block(
230 GaneshaConfParser(raw_config
).parse()[0], rados_namespace
))
232 def _save_export(self
, export
):
233 self
.exports
[self
.rados_namespace
].append(export
)
234 NFSRados(self
.mgr
, self
.rados_namespace
).write_obj(export
.to_export_block(),
235 f
'export-{export.export_id}', f
'conf-nfs.{export.cluster_id}')
237 def _delete_export(self
, cluster_id
, pseudo_path
, export_obj
=None):
242 export
= self
._fetch
_export
(pseudo_path
)
246 NFSRados(self
.mgr
, self
.rados_namespace
).remove_obj(
247 f
'export-{export.export_id}', f
'conf-nfs.{cluster_id}')
248 self
.exports
[cluster_id
].remove(export
)
249 self
._delete
_user
(export
.fsal
.user_id
)
250 if not self
.exports
[cluster_id
]:
251 del self
.exports
[cluster_id
]
252 return 0, "Successfully deleted export", ""
253 return 0, "", "Export does not exist"
254 except Exception as e
:
255 return exception_handler(e
, f
"Failed to delete {pseudo_path} export for {cluster_id}")
257 def _fetch_export_obj(self
, ex_id
):
259 with self
.mgr
.rados
.open_ioctx(self
.rados_pool
) as ioctx
:
260 ioctx
.set_namespace(self
.rados_namespace
)
261 export
= Export
.from_export_block(GaneshaConfParser(ioctx
.read(f
"export-{ex_id}"
262 ).decode("utf-8")).parse()[0], self
.rados_namespace
)
264 except ObjectNotFound
:
265 log
.exception(f
"Export ID: {ex_id} not found")
267 def _update_export(self
, export
):
268 self
.exports
[self
.rados_namespace
].append(export
)
269 NFSRados(self
.mgr
, self
.rados_namespace
).update_obj(
270 GaneshaConfParser
.write_block(export
.to_export_block()),
271 f
'export-{export.export_id}', f
'conf-nfs.{export.cluster_id}')
273 def format_path(self
, path
):
275 path
= normpath(path
.strip())
280 @export_cluster_checker
281 def create_export(self
, **kwargs
):
283 fsal_type
= kwargs
.pop('fsal_type')
284 if fsal_type
== 'cephfs':
285 return FSExport(self
).create_export(**kwargs
)
286 raise NotImplementedError()
287 except Exception as e
:
288 return exception_handler(e
, f
"Failed to create {kwargs['pseudo_path']} export for {kwargs['cluster_id']}")
290 @export_cluster_checker
291 def delete_export(self
, cluster_id
, pseudo_path
):
292 return self
._delete
_export
(cluster_id
, pseudo_path
)
294 def delete_all_exports(self
, cluster_id
):
296 export_list
= list(self
.exports
[cluster_id
])
298 log
.info("No exports to delete")
300 self
.rados_namespace
= cluster_id
301 for export
in export_list
:
302 ret
, out
, err
= self
._delete
_export
(cluster_id
=cluster_id
, pseudo_path
=None,
305 raise NFSException(-1, f
"Failed to delete exports: {err} and {ret}")
306 log
.info(f
"All exports successfully deleted for cluster id: {cluster_id}")
308 @export_cluster_checker
309 def list_exports(self
, cluster_id
, detailed
):
312 result
= [export
.to_dict() for export
in self
.exports
[cluster_id
]]
314 result
= [export
.pseudo
for export
in self
.exports
[cluster_id
]]
315 return 0, json
.dumps(result
, indent
=2), ''
317 log
.warning(f
"No exports to list for {cluster_id}")
319 except Exception as e
:
320 return exception_handler(e
, f
"Failed to list exports for {cluster_id}")
322 @export_cluster_checker
323 def get_export(self
, cluster_id
, pseudo_path
):
325 export
= self
._fetch
_export
(pseudo_path
)
327 return 0, json
.dumps(export
.to_dict(), indent
=2), ''
328 log
.warning(f
"No {pseudo_path} export to show for {cluster_id}")
330 except Exception as e
:
331 return exception_handler(e
, f
"Failed to get {pseudo_path} export for {cluster_id}")
333 def update_export(self
, export_config
):
335 if not export_config
:
336 raise NFSInvalidOperation("Empty Config!!")
337 new_export
= json
.loads(export_config
)
339 return FSExport(self
).update_export(new_export
)
340 except NotImplementedError:
341 return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
342 except Exception as e
:
343 return exception_handler(e
, f
'Failed to update export: {e}')
346 class FSExport(ExportMgr
):
347 def __init__(self
, export_mgr_obj
):
348 super().__init
__(export_mgr_obj
.mgr
, export_mgr_obj
.rados_namespace
,
349 export_mgr_obj
._exports
)
351 def _validate_export(self
, new_export_dict
):
352 if new_export_dict
['cluster_id'] not in available_clusters(self
.mgr
):
353 raise ClusterNotFound()
355 export
= self
._fetch
_export
(new_export_dict
['pseudo'])
358 # Check if export id matches
359 if export
.export_id
!= new_export_dict
['export_id']:
360 raise NFSInvalidOperation('Export ID changed, Cannot update export')
362 # Fetch export based on export id object
363 export
= self
._fetch
_export
_obj
(new_export_dict
['export_id'])
365 raise NFSObjectNotFound('Export does not exist')
367 new_export_dict
['pseudo'] = self
.format_path(new_export_dict
['pseudo'])
368 ValidateExport
.pseudo_path(new_export_dict
['pseudo'])
369 log
.debug(f
"Pseudo path has changed from {export.pseudo} to "
370 f
"{new_export_dict['pseudo']}")
371 # Check if squash changed
372 if export
.squash
!= new_export_dict
['squash']:
373 if new_export_dict
['squash']:
374 new_export_dict
['squash'] = new_export_dict
['squash'].lower()
375 ValidateExport
.squash(new_export_dict
['squash'])
376 log
.debug(f
"squash has changed from {export.squash} to {new_export_dict['squash']}")
377 # Security label check
378 if export
.security_label
!= new_export_dict
['security_label']:
379 ValidateExport
.security_label(new_export_dict
['security_label'])
381 if export
.protocols
!= new_export_dict
['protocols']:
382 ValidateExport
.protocols(new_export_dict
['protocols'])
384 if export
.transports
!= new_export_dict
['transports']:
385 ValidateExport
.transport(new_export_dict
['transports'])
387 if export
.path
!= new_export_dict
['path']:
388 new_export_dict
['path'] = self
.format_path(new_export_dict
['path'])
389 out_msg
= 'update caps'
391 if export
.access_type
!= new_export_dict
['access_type']:
392 ValidateExport
.access_type(new_export_dict
['access_type'])
394 if export
.fsal
!= new_export_dict
['fsal']:
395 ret
= ValidateExport
.fsal(self
.mgr
, export
.fsal
, new_export_dict
['fsal'])
396 if ret
== 1 and not out_msg
:
397 out_msg
= 'update caps'
399 if export
.clients
!= new_export_dict
['clients']:
400 ValidateExport
.clients(new_export_dict
['clients'])
401 log
.debug(f
'Validation succeeded for Export {export.pseudo}')
402 return export
, out_msg
404 def _update_user_id(self
, path
, access_type
, fs_name
, user_id
):
405 osd_cap
= 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
406 self
.rados_pool
, self
.rados_namespace
, fs_name
)
407 access_type
= 'r' if access_type
== 'RO' else 'rw'
409 self
.mgr
.check_mon_command({
410 'prefix': 'auth caps',
411 'entity': f
'client.{user_id}',
412 'caps': ['mon', 'allow r', 'osd', osd_cap
, 'mds', 'allow {} path={}'.format(
416 log
.info(f
"Export user updated {user_id}")
418 def _create_user_key(self
, entity
, path
, fs_name
, fs_ro
):
419 osd_cap
= 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
420 self
.rados_pool
, self
.rados_namespace
, fs_name
)
421 access_type
= 'r' if fs_ro
else 'rw'
423 ret
, out
, err
= self
.mgr
.check_mon_command({
424 'prefix': 'auth get-or-create',
425 'entity': 'client.{}'.format(entity
),
426 'caps': ['mon', 'allow r', 'osd', osd_cap
, 'mds', 'allow {} path={}'.format(
431 json_res
= json
.loads(out
)
432 log
.info("Export user created is {}".format(json_res
[0]['entity']))
433 return json_res
[0]['entity'], json_res
[0]['key']
435 def create_export(self
, fs_name
, cluster_id
, pseudo_path
, read_only
, path
):
436 if not check_fs(self
.mgr
, fs_name
):
437 raise FSNotFound(fs_name
)
439 pseudo_path
= self
.format_path(pseudo_path
)
440 ValidateExport
.pseudo_path(pseudo_path
)
442 if cluster_id
not in self
.exports
:
443 self
.exports
[cluster_id
] = []
445 if not self
._fetch
_export
(pseudo_path
):
446 ex_id
= self
._gen
_export
_id
()
447 user_id
= f
"{cluster_id}{ex_id}"
448 user_out
, key
= self
._create
_user
_key
(user_id
, path
, fs_name
, read_only
)
453 'path': self
.format_path(path
),
454 'pseudo': pseudo_path
,
455 'cluster_id': cluster_id
,
456 'access_type': access_type
,
457 'fsal': {"name": "CEPH", "user_id": user_id
,
458 "fs_name": fs_name
, "sec_label_xattr": ""},
461 export
= Export
.from_dict(ex_id
, ex_dict
)
462 export
.fsal
.cephx_key
= key
463 self
._save
_export
(export
)
468 "cluster": cluster_id
,
471 return (0, json
.dumps(result
, indent
=4), '')
472 return 0, "", "Export already exists"
474 def update_export(self
, new_export
):
475 old_export
, update_user_caps
= self
._validate
_export
(new_export
)
477 self
._update
_user
_id
(new_export
['path'], new_export
['access_type'],
478 new_export
['fsal']['fs_name'], new_export
['fsal']['user_id'])
479 new_export
= Export
.from_dict(new_export
['export_id'], new_export
)
480 new_export
.fsal
.cephx_key
= old_export
.fsal
.cephx_key
481 self
._update
_export
(new_export
)
482 export_ls
= self
.exports
[self
.rados_namespace
]
483 if old_export
not in export_ls
:
484 # This happens when export is fetched by ID
485 old_export
= self
._fetch
_export
(old_export
.pseudo
)
486 export_ls
.remove(old_export
)
487 restart_nfs_service(self
.mgr
, new_export
.cluster_id
)
488 return 0, "Successfully updated export", ""