4 from typing
import List
, Any
, Dict
, Tuple
, Optional
, TYPE_CHECKING
, TypeVar
, Callable
, cast
5 from os
.path
import normpath
7 from rados
import TimedOut
, ObjectNotFound
9 from mgr_module
import NFS_POOL_NAME
as POOL_NAME
, NFS_GANESHA_SUPPORTED_FSALS
11 from .export_utils
import GaneshaConfParser
, Export
, RawBlock
, CephFSFSAL
, RGWFSAL
12 from .exception
import NFSException
, NFSInvalidOperation
, FSNotFound
, \
14 from .utils
import available_clusters
, check_fs
, restart_nfs_service
17 from nfs
.module
import Module
19 FuncT
= TypeVar('FuncT', bound
=Callable
)
21 log
= logging
.getLogger(__name__
)
24 def export_cluster_checker(func
: FuncT
) -> FuncT
:
29 ) -> Tuple
[int, str, str]:
31 This method checks if cluster exists
33 if kwargs
['cluster_id'] not in available_clusters(export
.mgr
):
34 return -errno
.ENOENT
, "", "Cluster does not exists"
35 return func(export
, *args
, **kwargs
)
36 return cast(FuncT
, cluster_check
)
39 def exception_handler(
40 exception_obj
: Exception,
42 ) -> Tuple
[int, str, str]:
44 log
.exception(log_msg
)
45 return getattr(exception_obj
, 'errno', -1), "", str(exception_obj
)
49 def __init__(self
, mgr
: 'Module', namespace
: str) -> None:
52 self
.namespace
= namespace
54 def _make_rados_url(self
, obj
: str) -> str:
55 return "rados://{}/{}/{}".format(self
.pool
, self
.namespace
, obj
)
57 def _create_url_block(self
, obj_name
: str) -> RawBlock
:
58 return RawBlock('%url', values
={'value': self
._make
_rados
_url
(obj_name
)})
60 def write_obj(self
, conf_block
: str, obj
: str, config_obj
: str = '') -> None:
61 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
62 ioctx
.set_namespace(self
.namespace
)
63 ioctx
.write_full(obj
, conf_block
.encode('utf-8'))
65 # Return after creating empty common config object
67 log
.debug("write configuration into rados object %s/%s/%s",
68 self
.pool
, self
.namespace
, obj
)
70 # Add created obj url to common config obj
71 ioctx
.append(config_obj
, GaneshaConfParser
.write_block(
72 self
._create
_url
_block
(obj
)).encode('utf-8'))
73 ExportMgr
._check
_rados
_notify
(ioctx
, config_obj
)
74 log
.debug("Added %s url to %s", obj
, config_obj
)
76 def read_obj(self
, obj
: str) -> Optional
[str]:
77 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
78 ioctx
.set_namespace(self
.namespace
)
80 return ioctx
.read(obj
, 1048576).decode()
81 except ObjectNotFound
:
84 def update_obj(self
, conf_block
: str, obj
: str, config_obj
: str) -> None:
85 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
86 ioctx
.set_namespace(self
.namespace
)
87 ioctx
.write_full(obj
, conf_block
.encode('utf-8'))
88 log
.debug("write configuration into rados object %s/%s/%s",
89 self
.pool
, self
.namespace
, obj
)
90 ExportMgr
._check
_rados
_notify
(ioctx
, config_obj
)
91 log
.debug("Update export %s in %s", obj
, config_obj
)
93 def remove_obj(self
, obj
: str, config_obj
: str) -> None:
94 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
95 ioctx
.set_namespace(self
.namespace
)
96 export_urls
= ioctx
.read(config_obj
)
97 url
= '%url "{}"\n\n'.format(self
._make
_rados
_url
(obj
))
98 export_urls
= export_urls
.replace(url
.encode('utf-8'), b
'')
99 ioctx
.remove_object(obj
)
100 ioctx
.write_full(config_obj
, export_urls
)
101 ExportMgr
._check
_rados
_notify
(ioctx
, config_obj
)
102 log
.debug("Object deleted: %s", url
)
104 def remove_all_obj(self
) -> None:
105 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
106 ioctx
.set_namespace(self
.namespace
)
107 for obj
in ioctx
.list_objects():
110 def check_user_config(self
) -> bool:
111 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
112 ioctx
.set_namespace(self
.namespace
)
113 for obj
in ioctx
.list_objects():
114 if obj
.key
.startswith("userconf-nfs"):
123 export_ls
: Optional
[Dict
[str, List
[Export
]]] = None
126 self
.rados_pool
= POOL_NAME
127 self
._exports
: Optional
[Dict
[str, List
[Export
]]] = export_ls
130 def _check_rados_notify(ioctx
: Any
, obj
: str) -> None:
134 log
.exception("Ganesha timed out")
137 def exports(self
) -> Dict
[str, List
[Export
]]:
138 if self
._exports
is None:
140 log
.info("Begin export parsing")
141 for cluster_id
in available_clusters(self
.mgr
):
142 self
.export_conf_objs
= [] # type: List[Export]
143 self
._read
_raw
_config
(cluster_id
)
144 self
.exports
[cluster_id
] = self
.export_conf_objs
145 log
.info("Exports parsed successfully %s", self
.exports
.items())
152 ) -> Optional
[Export
]:
154 for ex
in self
.exports
[cluster_id
]:
155 if ex
.pseudo
== pseudo_path
:
159 log
.info('no exports for cluster %s', cluster_id
)
162 def _fetch_export_id(
166 ) -> Optional
[Export
]:
168 for ex
in self
.exports
[cluster_id
]:
169 if ex
.export_id
== export_id
:
173 log
.info(f
'no exports for cluster {cluster_id}')
176 def _delete_export_user(self
, export
: Export
) -> None:
177 if isinstance(export
.fsal
, CephFSFSAL
):
178 assert export
.fsal
.user_id
179 self
.mgr
.check_mon_command({
181 'entity': 'client.{}'.format(export
.fsal
.user_id
),
183 log
.info("Deleted export user %s", export
.fsal
.user_id
)
184 elif isinstance(export
.fsal
, RGWFSAL
):
185 # do nothing; we're using the bucket owner creds.
188 def _create_export_user(self
, export
: Export
) -> None:
189 if isinstance(export
.fsal
, CephFSFSAL
):
190 fsal
= cast(CephFSFSAL
, export
.fsal
)
193 # is top-level or any client rw?
194 rw
= export
.access_type
.lower() == 'rw'
195 for c
in export
.clients
:
196 if c
.access_type
.lower() == 'rw':
200 fsal
.user_id
= f
"nfs.{export.cluster_id}.{export.export_id}"
201 fsal
.cephx_key
= self
._create
_user
_key
(
202 export
.cluster_id
, fsal
.user_id
, export
.path
, fsal
.fs_name
, not rw
204 log
.debug("Successfully created user %s for cephfs path %s", fsal
.user_id
, export
.path
)
206 elif isinstance(export
.fsal
, RGWFSAL
):
207 rgwfsal
= cast(RGWFSAL
, export
.fsal
)
208 if not rgwfsal
.user_id
:
210 ret
, out
, err
= self
.mgr
.tool_exec(
211 ['radosgw-admin', 'bucket', 'stats', '--bucket', export
.path
]
214 raise NFSException(f
'Failed to fetch owner for bucket {export.path}')
216 owner
= j
.get('owner', '')
217 rgwfsal
.user_id
= owner
218 assert rgwfsal
.user_id
219 ret
, out
, err
= self
.mgr
.tool_exec([
220 'radosgw-admin', 'user', 'info', '--uid', rgwfsal
.user_id
224 f
'Failed to fetch key for bucket {export.path} owner {rgwfsal.user_id}'
228 # FIXME: make this more tolerate of unexpected output?
229 rgwfsal
.access_key_id
= j
['keys'][0]['access_key']
230 rgwfsal
.secret_access_key
= j
['keys'][0]['secret_key']
231 log
.debug("Successfully fetched user %s for RGW path %s", rgwfsal
.user_id
, export
.path
)
233 def _gen_export_id(self
, cluster_id
: str) -> int:
234 exports
= sorted([ex
.export_id
for ex
in self
.exports
[cluster_id
]])
243 def _read_raw_config(self
, rados_namespace
: str) -> None:
244 with self
.mgr
.rados
.open_ioctx(self
.rados_pool
) as ioctx
:
245 ioctx
.set_namespace(rados_namespace
)
246 for obj
in ioctx
.list_objects():
247 if obj
.key
.startswith("export-"):
249 raw_config
= obj
.read(size
)
250 raw_config
= raw_config
.decode("utf-8")
251 log
.debug("read export configuration from rados "
252 "object %s/%s/%s", self
.rados_pool
,
253 rados_namespace
, obj
.key
)
254 self
.export_conf_objs
.append(Export
.from_export_block(
255 GaneshaConfParser(raw_config
).parse()[0], rados_namespace
))
257 def _save_export(self
, cluster_id
: str, export
: Export
) -> None:
258 self
.exports
[cluster_id
].append(export
)
259 NFSRados(self
.mgr
, cluster_id
).write_obj(
260 GaneshaConfParser
.write_block(export
.to_export_block()),
261 f
'export-{export.export_id}',
262 f
'conf-nfs.{export.cluster_id}'
268 pseudo_path
: Optional
[str],
269 export_obj
: Optional
[Export
] = None
270 ) -> Tuple
[int, str, str]:
273 export
: Optional
[Export
] = export_obj
276 export
= self
._fetch
_export
(cluster_id
, pseudo_path
)
280 NFSRados(self
.mgr
, cluster_id
).remove_obj(
281 f
'export-{export.export_id}', f
'conf-nfs.{cluster_id}')
282 self
.exports
[cluster_id
].remove(export
)
283 self
._delete
_export
_user
(export
)
284 if not self
.exports
[cluster_id
]:
285 del self
.exports
[cluster_id
]
286 log
.debug("Deleted all exports for cluster %s", cluster_id
)
287 return 0, "Successfully deleted export", ""
288 return 0, "", "Export does not exist"
289 except Exception as e
:
290 return exception_handler(e
, f
"Failed to delete {pseudo_path} export for {cluster_id}")
292 def _fetch_export_obj(self
, cluster_id
: str, ex_id
: int) -> Optional
[Export
]:
294 with self
.mgr
.rados
.open_ioctx(self
.rados_pool
) as ioctx
:
295 ioctx
.set_namespace(cluster_id
)
296 export
= Export
.from_export_block(
298 ioctx
.read(f
"export-{ex_id}").decode("utf-8")
303 except ObjectNotFound
:
304 log
.exception("Export ID: %s not found", ex_id
)
307 def _update_export(self
, cluster_id
: str, export
: Export
) -> None:
308 self
.exports
[cluster_id
].append(export
)
309 NFSRados(self
.mgr
, cluster_id
).update_obj(
310 GaneshaConfParser
.write_block(export
.to_export_block()),
311 f
'export-{export.export_id}', f
'conf-nfs.{export.cluster_id}')
313 def format_path(self
, path
: str) -> str:
315 path
= normpath(path
.strip())
320 @export_cluster_checker
321 def create_export(self
, addr
: Optional
[List
[str]] = None, **kwargs
: Any
) -> Tuple
[int, str, str]:
322 # if addr(s) are provided, construct client list and adjust outer block
327 'access_type': 'ro' if kwargs
['read_only'] else 'rw',
328 'squash': kwargs
['squash'],
330 kwargs
['squash'] = 'none'
331 kwargs
['clients'] = clients
334 kwargs
['access_type'] = "none"
335 elif kwargs
['read_only']:
336 kwargs
['access_type'] = "RO"
338 kwargs
['access_type'] = "RW"
340 if kwargs
['cluster_id'] not in self
.exports
:
341 self
.exports
[kwargs
['cluster_id']] = []
344 fsal_type
= kwargs
.pop('fsal_type')
345 if fsal_type
== 'cephfs':
346 return self
.create_cephfs_export(**kwargs
)
347 if fsal_type
== 'rgw':
348 return self
.create_rgw_export(**kwargs
)
349 raise NotImplementedError()
350 except Exception as e
:
351 return exception_handler(e
, f
"Failed to create {kwargs['pseudo_path']} export for {kwargs['cluster_id']}")
353 @export_cluster_checker
354 def delete_export(self
,
356 pseudo_path
: str) -> Tuple
[int, str, str]:
357 return self
._delete
_export
(cluster_id
, pseudo_path
)
359 def delete_all_exports(self
, cluster_id
: str) -> None:
361 export_list
= list(self
.exports
[cluster_id
])
363 log
.info("No exports to delete")
365 for export
in export_list
:
366 ret
, out
, err
= self
._delete
_export
(cluster_id
=cluster_id
, pseudo_path
=None,
369 raise NFSException(f
"Failed to delete exports: {err} and {ret}")
370 log
.info("All exports successfully deleted for cluster id: %s", cluster_id
)
372 def list_all_exports(self
) -> List
[Dict
[str, Any
]]:
374 for cluster_id
, ls
in self
.exports
.items():
375 r
.extend([e
.to_dict() for e
in ls
])
378 @export_cluster_checker
379 def list_exports(self
,
381 detailed
: bool = False) -> Tuple
[int, str, str]:
384 result_d
= [export
.to_dict() for export
in self
.exports
[cluster_id
]]
385 return 0, json
.dumps(result_d
, indent
=2), ''
387 result_ps
= [export
.pseudo
for export
in self
.exports
[cluster_id
]]
388 return 0, json
.dumps(result_ps
, indent
=2), ''
391 log
.warning("No exports to list for %s", cluster_id
)
393 except Exception as e
:
394 return exception_handler(e
, f
"Failed to list exports for {cluster_id}")
396 def _get_export_dict(self
, cluster_id
: str, pseudo_path
: str) -> Optional
[Dict
[str, Any
]]:
397 export
= self
._fetch
_export
(cluster_id
, pseudo_path
)
399 return export
.to_dict()
400 log
.warning(f
"No {pseudo_path} export to show for {cluster_id}")
403 @export_cluster_checker
408 ) -> Tuple
[int, str, str]:
410 export_dict
= self
._get
_export
_dict
(cluster_id
, pseudo_path
)
412 return 0, json
.dumps(export_dict
, indent
=2), ''
413 log
.warning("No %s export to show for %s", pseudo_path
, cluster_id
)
415 except Exception as e
:
416 return exception_handler(e
, f
"Failed to get {pseudo_path} export for {cluster_id}")
418 def get_export_by_id(
422 ) -> Optional
[Dict
[str, Any
]]:
423 export
= self
._fetch
_export
_id
(cluster_id
, export_id
)
424 return export
.to_dict() if export
else None
426 def get_export_by_pseudo(
430 ) -> Optional
[Dict
[str, Any
]]:
431 export
= self
._fetch
_export
(cluster_id
, pseudo_path
)
432 return export
.to_dict() if export
else None
434 def apply_export(self
, cluster_id
: str, export_config
: str) -> Tuple
[int, str, str]:
436 if not export_config
:
437 raise NFSInvalidOperation("Empty Config!!")
439 j
= json
.loads(export_config
)
441 # okay, not JSON. is it an EXPORT block?
443 blocks
= GaneshaConfParser(export_config
).parse()
445 Export
.from_export_block(block
, cluster_id
)
448 j
= [export
.to_dict() for export
in exports
]
449 except Exception as ex
:
450 raise NFSInvalidOperation(f
"Input must be JSON or a ganesha EXPORT block: {ex}")
453 if isinstance(j
, list):
454 ret
, out
, err
= (0, '', '')
457 r
, o
, e
= self
._apply
_export
(cluster_id
, export
)
458 except Exception as ex
:
459 r
, o
, e
= exception_handler(ex
, f
'Failed to apply export: {ex}')
468 r
, o
, e
= self
._apply
_export
(cluster_id
, j
)
470 except NotImplementedError:
471 return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
472 except Exception as e
:
473 return exception_handler(e
, f
'Failed to update export: {e}')
483 osd_cap
= 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
484 self
.rados_pool
, cluster_id
, fs_name
)
485 access_type
= 'r' if access_type
== 'RO' else 'rw'
487 self
.mgr
.check_mon_command({
488 'prefix': 'auth caps',
489 'entity': f
'client.{user_id}',
490 'caps': ['mon', 'allow r', 'osd', osd_cap
, 'mds', 'allow {} path={}'.format(
494 log
.info("Export user updated %s", user_id
)
496 def _create_user_key(
504 osd_cap
= 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
505 self
.rados_pool
, cluster_id
, fs_name
)
506 access_type
= 'r' if fs_ro
else 'rw'
510 'mds', 'allow {} path={}'.format(access_type
, path
)
513 ret
, out
, err
= self
.mgr
.mon_command({
514 'prefix': 'auth get-or-create',
515 'entity': 'client.{}'.format(entity
),
519 if ret
== -errno
.EINVAL
and 'does not match' in err
:
520 ret
, out
, err
= self
.mgr
.mon_command({
521 'prefix': 'auth caps',
522 'entity': 'client.{}'.format(entity
),
527 raise NFSException(f
'Failed to update caps for {entity}: {err}')
528 ret
, out
, err
= self
.mgr
.mon_command({
529 'prefix': 'auth get',
530 'entity': 'client.{}'.format(entity
),
534 raise NFSException(f
'Failed to fetch caps for {entity}: {err}')
536 json_res
= json
.loads(out
)
537 log
.info("Export user created is %s", json_res
[0]['entity'])
538 return json_res
[0]['key']
540 def create_export_from_dict(self
,
543 ex_dict
: Dict
[str, Any
]) -> Export
:
544 pseudo_path
= ex_dict
.get("pseudo")
546 raise NFSInvalidOperation("export must specify pseudo path")
548 path
= ex_dict
.get("path")
550 raise NFSInvalidOperation("export must specify path")
551 path
= self
.format_path(path
)
553 fsal
= ex_dict
.get("fsal", {})
554 fsal_type
= fsal
.get("name")
555 if fsal_type
== NFS_GANESHA_SUPPORTED_FSALS
[1]:
556 if '/' in path
and path
!= '/':
557 raise NFSInvalidOperation('"/" is not allowed in path with bucket name')
558 elif fsal_type
== NFS_GANESHA_SUPPORTED_FSALS
[0]:
559 fs_name
= fsal
.get("fs_name")
561 raise NFSInvalidOperation("export FSAL must specify fs_name")
562 if not check_fs(self
.mgr
, fs_name
):
563 raise FSNotFound(fs_name
)
565 user_id
= f
"nfs.{cluster_id}.{ex_id}"
566 if "user_id" in fsal
and fsal
["user_id"] != user_id
:
567 raise NFSInvalidOperation(f
"export FSAL user_id must be '{user_id}'")
569 raise NFSInvalidOperation(f
"NFS Ganesha supported FSALs are {NFS_GANESHA_SUPPORTED_FSALS}."
570 "Export must specify any one of it.")
572 ex_dict
["fsal"] = fsal
573 ex_dict
["cluster_id"] = cluster_id
574 export
= Export
.from_dict(ex_id
, ex_dict
)
575 export
.validate(self
.mgr
)
576 log
.debug("Successfully created %s export-%s from dict for cluster %s",
577 fsal_type
, ex_id
, cluster_id
)
580 def create_cephfs_export(self
,
588 clients
: list = []) -> Tuple
[int, str, str]:
589 pseudo_path
= self
.format_path(pseudo_path
)
591 if not self
._fetch
_export
(cluster_id
, pseudo_path
):
592 export
= self
.create_export_from_dict(
594 self
._gen
_export
_id
(cluster_id
),
596 "pseudo": pseudo_path
,
598 "access_type": access_type
,
601 "name": NFS_GANESHA_SUPPORTED_FSALS
[0],
607 log
.debug("creating cephfs export %s", export
)
608 self
._create
_export
_user
(export
)
609 self
._save
_export
(cluster_id
, export
)
611 "bind": export
.pseudo
,
614 "cluster": cluster_id
,
615 "mode": export
.access_type
,
617 return (0, json
.dumps(result
, indent
=4), '')
618 return 0, "", "Export already exists"
620 def create_rgw_export(self
,
626 bucket
: Optional
[str] = None,
627 user_id
: Optional
[str] = None,
628 clients
: list = []) -> Tuple
[int, str, str]:
629 pseudo_path
= self
.format_path(pseudo_path
)
631 if not bucket
and not user_id
:
632 return -errno
.EINVAL
, "", "Must specify either bucket or user_id"
634 if not self
._fetch
_export
(cluster_id
, pseudo_path
):
635 export
= self
.create_export_from_dict(
637 self
._gen
_export
_id
(cluster_id
),
639 "pseudo": pseudo_path
,
640 "path": bucket
or '/',
641 "access_type": access_type
,
644 "name": NFS_GANESHA_SUPPORTED_FSALS
[1],
650 log
.debug("creating rgw export %s", export
)
651 self
._create
_export
_user
(export
)
652 self
._save
_export
(cluster_id
, export
)
654 "bind": export
.pseudo
,
656 "cluster": cluster_id
,
657 "mode": export
.access_type
,
658 "squash": export
.squash
,
660 return (0, json
.dumps(result
, indent
=4), '')
661 return 0, "", "Export already exists"
666 new_export_dict
: Dict
,
667 ) -> Tuple
[int, str, str]:
668 for k
in ['path', 'pseudo']:
669 if k
not in new_export_dict
:
670 raise NFSInvalidOperation(f
'Export missing required field {k}')
671 if cluster_id
not in available_clusters(self
.mgr
):
672 raise ClusterNotFound()
673 if cluster_id
not in self
.exports
:
674 self
.exports
[cluster_id
] = []
676 new_export_dict
['path'] = self
.format_path(new_export_dict
['path'])
677 new_export_dict
['pseudo'] = self
.format_path(new_export_dict
['pseudo'])
679 old_export
= self
._fetch
_export
(cluster_id
, new_export_dict
['pseudo'])
681 # Check if export id matches
682 if new_export_dict
.get('export_id'):
683 if old_export
.export_id
!= new_export_dict
.get('export_id'):
684 raise NFSInvalidOperation('Export ID changed, Cannot update export')
686 new_export_dict
['export_id'] = old_export
.export_id
687 elif new_export_dict
.get('export_id'):
688 old_export
= self
._fetch
_export
_obj
(cluster_id
, new_export_dict
['export_id'])
690 # re-fetch via old pseudo
691 old_export
= self
._fetch
_export
(cluster_id
, old_export
.pseudo
)
693 log
.debug("export %s pseudo %s -> %s",
694 old_export
.export_id
, old_export
.pseudo
, new_export_dict
['pseudo'])
696 new_export
= self
.create_export_from_dict(
698 new_export_dict
.get('export_id', self
._gen
_export
_id
(cluster_id
)),
703 self
._create
_export
_user
(new_export
)
704 self
._save
_export
(cluster_id
, new_export
)
705 return 0, f
'Added export {new_export.pseudo}', ''
707 if old_export
.fsal
.name
!= new_export
.fsal
.name
:
708 raise NFSInvalidOperation('FSAL change not allowed')
709 if old_export
.pseudo
!= new_export
.pseudo
:
710 log
.debug('export %s pseudo %s -> %s',
711 new_export
.export_id
, old_export
.pseudo
, new_export
.pseudo
)
713 if old_export
.fsal
.name
== NFS_GANESHA_SUPPORTED_FSALS
[0]:
714 old_fsal
= cast(CephFSFSAL
, old_export
.fsal
)
715 new_fsal
= cast(CephFSFSAL
, new_export
.fsal
)
716 if old_fsal
.user_id
!= new_fsal
.user_id
:
717 self
._delete
_export
_user
(old_export
)
718 self
._create
_export
_user
(new_export
)
720 old_export
.path
!= new_export
.path
721 or old_fsal
.fs_name
!= new_fsal
.fs_name
723 self
._update
_user
_id
(
726 new_export
.access_type
,
727 cast(str, new_fsal
.fs_name
),
728 cast(str, new_fsal
.user_id
)
730 new_fsal
.cephx_key
= old_fsal
.cephx_key
732 new_fsal
.cephx_key
= old_fsal
.cephx_key
733 if old_export
.fsal
.name
== NFS_GANESHA_SUPPORTED_FSALS
[1]:
734 old_rgw_fsal
= cast(RGWFSAL
, old_export
.fsal
)
735 new_rgw_fsal
= cast(RGWFSAL
, new_export
.fsal
)
736 if old_rgw_fsal
.user_id
!= new_rgw_fsal
.user_id
:
737 self
._delete
_export
_user
(old_export
)
738 self
._create
_export
_user
(new_export
)
739 elif old_rgw_fsal
.access_key_id
!= new_rgw_fsal
.access_key_id
:
740 raise NFSInvalidOperation('access_key_id change is not allowed')
741 elif old_rgw_fsal
.secret_access_key
!= new_rgw_fsal
.secret_access_key
:
742 raise NFSInvalidOperation('secret_access_key change is not allowed')
744 self
.exports
[cluster_id
].remove(old_export
)
745 self
._update
_export
(cluster_id
, new_export
)
747 # TODO: detect whether the update is such that a reload is sufficient
748 restart_nfs_service(self
.mgr
, new_export
.cluster_id
)
750 return 0, f
"Updated export {new_export.pseudo}", ""