14 from os
.path
import normpath
17 from rados
import TimedOut
, ObjectNotFound
, Rados
, LIBRADOS_ALL_NSPACES
19 from object_format
import ErrorResponse
20 from orchestrator
import NoOrchestrator
21 from mgr_module
import NFS_POOL_NAME
as POOL_NAME
, NFS_GANESHA_SUPPORTED_FSALS
23 from .ganesha_conf
import (
30 from .exception
import NFSException
, NFSInvalidOperation
, FSNotFound
, NFSObjectNotFound
40 restart_nfs_service
, cephfs_path_is_dir
)
43 from nfs
.module
import Module
45 FuncT
= TypeVar('FuncT', bound
=Callable
)
47 log
= logging
.getLogger(__name__
)
50 def known_cluster_ids(mgr
: 'Module') -> Set
[str]:
51 """Return the set of known cluster IDs."""
53 clusters
= set(available_clusters(mgr
))
54 except NoOrchestrator
:
55 clusters
= nfs_rados_configs(mgr
.rados
)
59 def _check_rados_notify(ioctx
: Any
, obj
: str) -> None:
63 log
.exception("Ganesha timed out")
66 def normalize_path(path
: str) -> str:
68 path
= normpath(path
.strip())
75 def __init__(self
, rados
: 'Rados', namespace
: str) -> None:
78 self
.namespace
= namespace
80 def _make_rados_url(self
, obj
: str) -> str:
81 return "rados://{}/{}/{}".format(self
.pool
, self
.namespace
, obj
)
83 def _create_url_block(self
, obj_name
: str) -> RawBlock
:
84 return RawBlock('%url', values
={'value': self
._make
_rados
_url
(obj_name
)})
86 def write_obj(self
, conf_block
: str, obj
: str, config_obj
: str = '') -> None:
87 with self
.rados
.open_ioctx(self
.pool
) as ioctx
:
88 ioctx
.set_namespace(self
.namespace
)
89 ioctx
.write_full(obj
, conf_block
.encode('utf-8'))
91 # Return after creating empty common config object
93 log
.debug("write configuration into rados object %s/%s/%s",
94 self
.pool
, self
.namespace
, obj
)
96 # Add created obj url to common config obj
97 ioctx
.append(config_obj
, format_block(
98 self
._create
_url
_block
(obj
)).encode('utf-8'))
99 _check_rados_notify(ioctx
, config_obj
)
100 log
.debug("Added %s url to %s", obj
, config_obj
)
102 def read_obj(self
, obj
: str) -> Optional
[str]:
103 with self
.rados
.open_ioctx(self
.pool
) as ioctx
:
104 ioctx
.set_namespace(self
.namespace
)
106 return ioctx
.read(obj
, 1048576).decode()
107 except ObjectNotFound
:
110 def update_obj(self
, conf_block
: str, obj
: str, config_obj
: str,
111 should_notify
: Optional
[bool] = True) -> None:
112 with self
.rados
.open_ioctx(self
.pool
) as ioctx
:
113 ioctx
.set_namespace(self
.namespace
)
114 ioctx
.write_full(obj
, conf_block
.encode('utf-8'))
115 log
.debug("write configuration into rados object %s/%s/%s",
116 self
.pool
, self
.namespace
, obj
)
118 _check_rados_notify(ioctx
, config_obj
)
119 log
.debug("Update export %s in %s", obj
, config_obj
)
121 def remove_obj(self
, obj
: str, config_obj
: str) -> None:
122 with self
.rados
.open_ioctx(self
.pool
) as ioctx
:
123 ioctx
.set_namespace(self
.namespace
)
124 export_urls
= ioctx
.read(config_obj
)
125 url
= '%url "{}"\n\n'.format(self
._make
_rados
_url
(obj
))
126 export_urls
= export_urls
.replace(url
.encode('utf-8'), b
'')
127 ioctx
.remove_object(obj
)
128 ioctx
.write_full(config_obj
, export_urls
)
129 _check_rados_notify(ioctx
, config_obj
)
130 log
.debug("Object deleted: %s", url
)
132 def remove_all_obj(self
) -> None:
133 with self
.rados
.open_ioctx(self
.pool
) as ioctx
:
134 ioctx
.set_namespace(self
.namespace
)
135 for obj
in ioctx
.list_objects():
138 def check_user_config(self
) -> bool:
139 with self
.rados
.open_ioctx(self
.pool
) as ioctx
:
140 ioctx
.set_namespace(self
.namespace
)
141 for obj
in ioctx
.list_objects():
142 if obj
.key
.startswith(USER_CONF_PREFIX
):
147 def nfs_rados_configs(rados
: 'Rados', nfs_pool
: str = POOL_NAME
) -> Set
[str]:
148 """Return a set of all the namespaces in the nfs_pool where nfs
149 configuration objects are found. The namespaces also correspond
153 prefixes
= (EXPORT_PREFIX
, CONF_PREFIX
, USER_CONF_PREFIX
)
154 with rados
.open_ioctx(nfs_pool
) as ioctx
:
155 ioctx
.set_namespace(LIBRADOS_ALL_NSPACES
)
156 for obj
in ioctx
.list_objects():
157 if obj
.key
.startswith(prefixes
):
162 class AppliedExportResults
:
163 """Gathers the results of multiple changed exports.
164 Returned by apply_export.
167 def __init__(self
) -> None:
168 self
.changes
: List
[Dict
[str, str]] = []
169 self
.has_error
= False
171 def append(self
, value
: Dict
[str, str]) -> None:
172 if value
.get("state", "") == "error":
173 self
.has_error
= True
174 self
.changes
.append(value
)
176 def to_simplified(self
) -> List
[Dict
[str, str]]:
179 def mgr_return_value(self
) -> int:
180 return -errno
.EIO
if self
.has_error
else 0
187 export_ls
: Optional
[Dict
[str, List
[Export
]]] = None
190 self
.rados_pool
= POOL_NAME
191 self
._exports
: Optional
[Dict
[str, List
[Export
]]] = export_ls
194 def exports(self
) -> Dict
[str, List
[Export
]]:
195 if self
._exports
is None:
197 log
.info("Begin export parsing")
198 for cluster_id
in known_cluster_ids(self
.mgr
):
199 self
.export_conf_objs
= [] # type: List[Export]
200 self
._read
_raw
_config
(cluster_id
)
201 self
._exports
[cluster_id
] = self
.export_conf_objs
202 log
.info("Exports parsed successfully %s", self
.exports
.items())
209 ) -> Optional
[Export
]:
211 for ex
in self
.exports
[cluster_id
]:
212 if ex
.pseudo
== pseudo_path
:
216 log
.info('no exports for cluster %s', cluster_id
)
219 def _fetch_export_id(
223 ) -> Optional
[Export
]:
225 for ex
in self
.exports
[cluster_id
]:
226 if ex
.export_id
== export_id
:
230 log
.info(f
'no exports for cluster {cluster_id}')
233 def _delete_export_user(self
, export
: Export
) -> None:
234 if isinstance(export
.fsal
, CephFSFSAL
):
235 assert export
.fsal
.user_id
236 self
.mgr
.check_mon_command({
238 'entity': 'client.{}'.format(export
.fsal
.user_id
),
240 log
.info("Deleted export user %s", export
.fsal
.user_id
)
241 elif isinstance(export
.fsal
, RGWFSAL
):
242 # do nothing; we're using the bucket owner creds.
245 def _create_export_user(self
, export
: Export
) -> None:
246 if isinstance(export
.fsal
, CephFSFSAL
):
247 fsal
= cast(CephFSFSAL
, export
.fsal
)
249 fsal
.user_id
= f
"nfs.{export.cluster_id}.{export.export_id}"
250 fsal
.cephx_key
= self
._create
_user
_key
(
251 export
.cluster_id
, fsal
.user_id
, export
.path
, fsal
.fs_name
253 log
.debug("Successfully created user %s for cephfs path %s", fsal
.user_id
, export
.path
)
255 elif isinstance(export
.fsal
, RGWFSAL
):
256 rgwfsal
= cast(RGWFSAL
, export
.fsal
)
257 if not rgwfsal
.user_id
:
259 ret
, out
, err
= self
.mgr
.tool_exec(
260 ['radosgw-admin', 'bucket', 'stats', '--bucket', export
.path
]
263 raise NFSException(f
'Failed to fetch owner for bucket {export.path}')
265 owner
= j
.get('owner', '')
266 rgwfsal
.user_id
= owner
267 assert rgwfsal
.user_id
268 ret
, out
, err
= self
.mgr
.tool_exec([
269 'radosgw-admin', 'user', 'info', '--uid', rgwfsal
.user_id
273 f
'Failed to fetch key for bucket {export.path} owner {rgwfsal.user_id}'
277 # FIXME: make this more tolerate of unexpected output?
278 rgwfsal
.access_key_id
= j
['keys'][0]['access_key']
279 rgwfsal
.secret_access_key
= j
['keys'][0]['secret_key']
280 log
.debug("Successfully fetched user %s for RGW path %s", rgwfsal
.user_id
, export
.path
)
282 def _gen_export_id(self
, cluster_id
: str) -> int:
283 exports
= sorted([ex
.export_id
for ex
in self
.exports
[cluster_id
]])
292 def _read_raw_config(self
, rados_namespace
: str) -> None:
293 with self
.mgr
.rados
.open_ioctx(self
.rados_pool
) as ioctx
:
294 ioctx
.set_namespace(rados_namespace
)
295 for obj
in ioctx
.list_objects():
296 if obj
.key
.startswith(EXPORT_PREFIX
):
298 raw_config
= obj
.read(size
)
299 raw_config
= raw_config
.decode("utf-8")
300 log
.debug("read export configuration from rados "
301 "object %s/%s/%s", self
.rados_pool
,
302 rados_namespace
, obj
.key
)
303 self
.export_conf_objs
.append(Export
.from_export_block(
304 GaneshaConfParser(raw_config
).parse()[0], rados_namespace
))
306 def _save_export(self
, cluster_id
: str, export
: Export
) -> None:
307 self
.exports
[cluster_id
].append(export
)
308 self
._rados
(cluster_id
).write_obj(
309 format_block(export
.to_export_block()),
310 export_obj_name(export
.export_id
),
311 conf_obj_name(export
.cluster_id
)
317 pseudo_path
: Optional
[str],
318 export_obj
: Optional
[Export
] = None
322 export
: Optional
[Export
] = export_obj
325 export
= self
._fetch
_export
(cluster_id
, pseudo_path
)
329 self
._rados
(cluster_id
).remove_obj(
330 export_obj_name(export
.export_id
), conf_obj_name(cluster_id
))
331 self
.exports
[cluster_id
].remove(export
)
332 self
._delete
_export
_user
(export
)
333 if not self
.exports
[cluster_id
]:
334 del self
.exports
[cluster_id
]
335 log
.debug("Deleted all exports for cluster %s", cluster_id
)
337 raise NonFatalError("Export does not exist")
338 except Exception as e
:
339 log
.exception(f
"Failed to delete {pseudo_path} export for {cluster_id}")
340 raise ErrorResponse
.wrap(e
)
342 def _fetch_export_obj(self
, cluster_id
: str, ex_id
: int) -> Optional
[Export
]:
344 with self
.mgr
.rados
.open_ioctx(self
.rados_pool
) as ioctx
:
345 ioctx
.set_namespace(cluster_id
)
346 export
= Export
.from_export_block(
348 ioctx
.read(export_obj_name(ex_id
)).decode("utf-8")
353 except ObjectNotFound
:
354 log
.exception("Export ID: %s not found", ex_id
)
357 def _update_export(self
, cluster_id
: str, export
: Export
,
358 need_nfs_service_restart
: bool) -> None:
359 self
.exports
[cluster_id
].append(export
)
360 self
._rados
(cluster_id
).update_obj(
361 format_block(export
.to_export_block()),
362 export_obj_name(export
.export_id
), conf_obj_name(export
.cluster_id
),
363 should_notify
=not need_nfs_service_restart
)
364 if need_nfs_service_restart
:
365 restart_nfs_service(self
.mgr
, export
.cluster_id
)
367 def _validate_cluster_id(self
, cluster_id
: str) -> None:
368 """Raise an exception if cluster_id is not valid."""
369 clusters
= known_cluster_ids(self
.mgr
)
370 log
.debug("checking for %r in known nfs clusters: %r",
371 cluster_id
, clusters
)
372 if cluster_id
not in clusters
:
373 raise ErrorResponse(f
"Cluster {cluster_id!r} does not exist",
374 return_value
=-errno
.ENOENT
)
376 def create_export(self
, addr
: Optional
[List
[str]] = None, **kwargs
: Any
) -> Dict
[str, Any
]:
377 self
._validate
_cluster
_id
(kwargs
['cluster_id'])
378 # if addr(s) are provided, construct client list and adjust outer block
383 'access_type': 'ro' if kwargs
['read_only'] else 'rw',
384 'squash': kwargs
['squash'],
386 kwargs
['squash'] = 'none'
387 kwargs
['clients'] = clients
390 kwargs
['access_type'] = "none"
391 elif kwargs
['read_only']:
392 kwargs
['access_type'] = "RO"
394 kwargs
['access_type'] = "RW"
396 if kwargs
['cluster_id'] not in self
.exports
:
397 self
.exports
[kwargs
['cluster_id']] = []
400 fsal_type
= kwargs
.pop('fsal_type')
401 if fsal_type
== 'cephfs':
402 return self
.create_cephfs_export(**kwargs
)
403 if fsal_type
== 'rgw':
404 return self
.create_rgw_export(**kwargs
)
405 raise NotImplementedError()
406 except Exception as e
:
408 f
"Failed to create {kwargs['pseudo_path']} export for {kwargs['cluster_id']}")
409 raise ErrorResponse
.wrap(e
)
411 def delete_export(self
,
413 pseudo_path
: str) -> None:
414 self
._validate
_cluster
_id
(cluster_id
)
415 return self
._delete
_export
(cluster_id
, pseudo_path
)
417 def delete_all_exports(self
, cluster_id
: str) -> None:
419 export_list
= list(self
.exports
[cluster_id
])
421 log
.info("No exports to delete")
423 for export
in export_list
:
425 self
._delete
_export
(cluster_id
=cluster_id
, pseudo_path
=None,
427 except Exception as e
:
428 raise NFSException(f
"Failed to delete export {export.export_id}: {e}")
429 log
.info("All exports successfully deleted for cluster id: %s", cluster_id
)
431 def list_all_exports(self
) -> List
[Dict
[str, Any
]]:
433 for cluster_id
, ls
in self
.exports
.items():
434 r
.extend([e
.to_dict() for e
in ls
])
437 def list_exports(self
,
439 detailed
: bool = False) -> List
[Any
]:
440 self
._validate
_cluster
_id
(cluster_id
)
443 result_d
= [export
.to_dict() for export
in self
.exports
[cluster_id
]]
446 result_ps
= [export
.pseudo
for export
in self
.exports
[cluster_id
]]
450 log
.warning("No exports to list for %s", cluster_id
)
452 except Exception as e
:
453 log
.exception(f
"Failed to list exports for {cluster_id}")
454 raise ErrorResponse
.wrap(e
)
456 def _get_export_dict(self
, cluster_id
: str, pseudo_path
: str) -> Optional
[Dict
[str, Any
]]:
457 export
= self
._fetch
_export
(cluster_id
, pseudo_path
)
459 return export
.to_dict()
460 log
.warning(f
"No {pseudo_path} export to show for {cluster_id}")
468 self
._validate
_cluster
_id
(cluster_id
)
470 export_dict
= self
._get
_export
_dict
(cluster_id
, pseudo_path
)
471 log
.info(f
"Fetched {export_dict!r} for {cluster_id!r}, {pseudo_path!r}")
472 return export_dict
if export_dict
else {}
473 except Exception as e
:
474 log
.exception(f
"Failed to get {pseudo_path} export for {cluster_id}")
475 raise ErrorResponse
.wrap(e
)
477 def get_export_by_id(
481 ) -> Optional
[Dict
[str, Any
]]:
482 export
= self
._fetch
_export
_id
(cluster_id
, export_id
)
483 return export
.to_dict() if export
else None
485 def get_export_by_pseudo(
489 ) -> Optional
[Dict
[str, Any
]]:
490 export
= self
._fetch
_export
(cluster_id
, pseudo_path
)
491 return export
.to_dict() if export
else None
493 # This method is used by the dashboard module (../dashboard/controllers/nfs.py)
494 # Do not change interface without updating the Dashboard code
495 def apply_export(self
, cluster_id
: str, export_config
: str) -> AppliedExportResults
:
497 exports
= self
._read
_export
_config
(cluster_id
, export_config
)
498 except Exception as e
:
499 log
.exception(f
'Failed to update export: {e}')
500 raise ErrorResponse
.wrap(e
)
502 aeresults
= AppliedExportResults()
503 for export
in exports
:
504 aeresults
.append(self
._change
_export
(cluster_id
, export
))
507 def _read_export_config(self
, cluster_id
: str, export_config
: str) -> List
[Dict
]:
508 if not export_config
:
509 raise NFSInvalidOperation("Empty Config!!")
511 j
= json
.loads(export_config
)
513 # okay, not JSON. is it an EXPORT block?
515 blocks
= GaneshaConfParser(export_config
).parse()
517 Export
.from_export_block(block
, cluster_id
)
520 j
= [export
.to_dict() for export
in exports
]
521 except Exception as ex
:
522 raise NFSInvalidOperation(f
"Input must be JSON or a ganesha EXPORT block: {ex}")
523 # check export type - always return a list
524 if isinstance(j
, list):
525 return j
# j is already a list object
526 return [j
] # return a single object list, with j as the only item
528 def _change_export(self
, cluster_id
: str, export
: Dict
) -> Dict
[str, str]:
530 return self
._apply
_export
(cluster_id
, export
)
531 except NotImplementedError:
532 # in theory, the NotImplementedError here may be raised by a hook back to
533 # an orchestration module. If the orchestration module supports it the NFS
534 # servers may be restarted. If not supported the expectation is that an
535 # (unfortunately generic) NotImplementedError will be raised. We then
536 # indicate to the user that manual intervention may be needed now that the
537 # configuration changes have been applied.
539 "pseudo": export
['pseudo'],
541 "msg": "changes applied (Manual restart of NFS Pods required)",
543 except Exception as ex
:
544 msg
= f
'Failed to apply export: {ex}'
546 return {"state": "error", "msg": msg
}
555 osd_cap
= 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
556 self
.rados_pool
, cluster_id
, fs_name
)
557 # NFS-Ganesha can dynamically enforce an export's access type changes, but Ceph server
558 # daemons can't dynamically enforce changes in Ceph user caps of the Ceph clients. To
559 # allow dynamic updates of CephFS NFS exports, always set FSAL Ceph user's MDS caps with
560 # path restricted read-write access. Rely on the ganesha servers to enforce the export
561 # access type requested for the NFS clients.
562 self
.mgr
.check_mon_command({
563 'prefix': 'auth caps',
564 'entity': f
'client.{user_id}',
565 'caps': ['mon', 'allow r', 'osd', osd_cap
, 'mds', 'allow rw path={}'.format(path
)],
568 log
.info("Export user updated %s", user_id
)
570 def _create_user_key(
577 osd_cap
= 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
578 self
.rados_pool
, cluster_id
, fs_name
)
582 'mds', 'allow rw path={}'.format(path
)
585 ret
, out
, err
= self
.mgr
.mon_command({
586 'prefix': 'auth get-or-create',
587 'entity': 'client.{}'.format(entity
),
591 if ret
== -errno
.EINVAL
and 'does not match' in err
:
592 ret
, out
, err
= self
.mgr
.mon_command({
593 'prefix': 'auth caps',
594 'entity': 'client.{}'.format(entity
),
599 raise NFSException(f
'Failed to update caps for {entity}: {err}')
600 ret
, out
, err
= self
.mgr
.mon_command({
601 'prefix': 'auth get',
602 'entity': 'client.{}'.format(entity
),
606 raise NFSException(f
'Failed to fetch caps for {entity}: {err}')
608 json_res
= json
.loads(out
)
609 log
.info("Export user created is %s", json_res
[0]['entity'])
610 return json_res
[0]['key']
612 def create_export_from_dict(self
,
615 ex_dict
: Dict
[str, Any
]) -> Export
:
616 pseudo_path
= ex_dict
.get("pseudo")
618 raise NFSInvalidOperation("export must specify pseudo path")
620 path
= ex_dict
.get("path")
622 raise NFSInvalidOperation("export must specify path")
623 path
= normalize_path(path
)
625 fsal
= ex_dict
.get("fsal", {})
626 fsal_type
= fsal
.get("name")
627 if fsal_type
== NFS_GANESHA_SUPPORTED_FSALS
[1]:
628 if '/' in path
and path
!= '/':
629 raise NFSInvalidOperation('"/" is not allowed in path with bucket name')
630 elif fsal_type
== NFS_GANESHA_SUPPORTED_FSALS
[0]:
631 fs_name
= fsal
.get("fs_name")
633 raise NFSInvalidOperation("export FSAL must specify fs_name")
634 if not check_fs(self
.mgr
, fs_name
):
635 raise FSNotFound(fs_name
)
637 user_id
= f
"nfs.{cluster_id}.{ex_id}"
638 if "user_id" in fsal
and fsal
["user_id"] != user_id
:
639 raise NFSInvalidOperation(f
"export FSAL user_id must be '{user_id}'")
641 raise NFSInvalidOperation(f
"NFS Ganesha supported FSALs are {NFS_GANESHA_SUPPORTED_FSALS}."
642 "Export must specify any one of it.")
644 ex_dict
["fsal"] = fsal
645 ex_dict
["cluster_id"] = cluster_id
646 export
= Export
.from_dict(ex_id
, ex_dict
)
647 export
.validate(self
.mgr
)
648 log
.debug("Successfully created %s export-%s from dict for cluster %s",
649 fsal_type
, ex_id
, cluster_id
)
652 def create_cephfs_export(self
,
661 sectype
: Optional
[List
[str]] = None) -> Dict
[str, Any
]:
664 cephfs_path_is_dir(self
.mgr
, fs_name
, path
)
665 except NotADirectoryError
:
666 raise NFSException(f
"path {path} is not a dir", -errno
.ENOTDIR
)
667 except cephfs
.ObjectNotFound
:
668 raise NFSObjectNotFound(f
"path {path} does not exist")
669 except cephfs
.Error
as e
:
670 raise NFSException(e
.args
[1], -e
.args
[0])
672 pseudo_path
= normalize_path(pseudo_path
)
674 if not self
._fetch
_export
(cluster_id
, pseudo_path
):
675 export
= self
.create_export_from_dict(
677 self
._gen
_export
_id
(cluster_id
),
679 "pseudo": pseudo_path
,
681 "access_type": access_type
,
684 "name": NFS_GANESHA_SUPPORTED_FSALS
[0],
691 log
.debug("creating cephfs export %s", export
)
692 self
._create
_export
_user
(export
)
693 self
._save
_export
(cluster_id
, export
)
695 "bind": export
.pseudo
,
698 "cluster": cluster_id
,
699 "mode": export
.access_type
,
702 raise NonFatalError("Export already exists")
704 def create_rgw_export(self
,
710 bucket
: Optional
[str] = None,
711 user_id
: Optional
[str] = None,
713 sectype
: Optional
[List
[str]] = None) -> Dict
[str, Any
]:
714 pseudo_path
= normalize_path(pseudo_path
)
716 if not bucket
and not user_id
:
717 raise ErrorResponse("Must specify either bucket or user_id")
719 if not self
._fetch
_export
(cluster_id
, pseudo_path
):
720 export
= self
.create_export_from_dict(
722 self
._gen
_export
_id
(cluster_id
),
724 "pseudo": pseudo_path
,
725 "path": bucket
or '/',
726 "access_type": access_type
,
729 "name": NFS_GANESHA_SUPPORTED_FSALS
[1],
736 log
.debug("creating rgw export %s", export
)
737 self
._create
_export
_user
(export
)
738 self
._save
_export
(cluster_id
, export
)
740 "bind": export
.pseudo
,
742 "cluster": cluster_id
,
743 "mode": export
.access_type
,
744 "squash": export
.squash
,
747 raise NonFatalError("Export already exists")
752 new_export_dict
: Dict
,
754 for k
in ['path', 'pseudo']:
755 if k
not in new_export_dict
:
756 raise NFSInvalidOperation(f
'Export missing required field {k}')
757 if cluster_id
not in self
.exports
:
758 self
.exports
[cluster_id
] = []
760 new_export_dict
['path'] = normalize_path(new_export_dict
['path'])
761 new_export_dict
['pseudo'] = normalize_path(new_export_dict
['pseudo'])
763 old_export
= self
._fetch
_export
(cluster_id
, new_export_dict
['pseudo'])
765 # Check if export id matches
766 if new_export_dict
.get('export_id'):
767 if old_export
.export_id
!= new_export_dict
.get('export_id'):
768 raise NFSInvalidOperation('Export ID changed, Cannot update export')
770 new_export_dict
['export_id'] = old_export
.export_id
771 elif new_export_dict
.get('export_id'):
772 old_export
= self
._fetch
_export
_obj
(cluster_id
, new_export_dict
['export_id'])
774 # re-fetch via old pseudo
775 old_export
= self
._fetch
_export
(cluster_id
, old_export
.pseudo
)
777 log
.debug("export %s pseudo %s -> %s",
778 old_export
.export_id
, old_export
.pseudo
, new_export_dict
['pseudo'])
780 new_export
= self
.create_export_from_dict(
782 new_export_dict
.get('export_id', self
._gen
_export
_id
(cluster_id
)),
787 self
._create
_export
_user
(new_export
)
788 self
._save
_export
(cluster_id
, new_export
)
789 return {"pseudo": new_export
.pseudo
, "state": "added"}
791 need_nfs_service_restart
= True
792 if old_export
.fsal
.name
!= new_export
.fsal
.name
:
793 raise NFSInvalidOperation('FSAL change not allowed')
794 if old_export
.pseudo
!= new_export
.pseudo
:
795 log
.debug('export %s pseudo %s -> %s',
796 new_export
.export_id
, old_export
.pseudo
, new_export
.pseudo
)
798 if old_export
.fsal
.name
== NFS_GANESHA_SUPPORTED_FSALS
[0]:
799 old_fsal
= cast(CephFSFSAL
, old_export
.fsal
)
800 new_fsal
= cast(CephFSFSAL
, new_export
.fsal
)
801 if old_fsal
.user_id
!= new_fsal
.user_id
:
802 self
._delete
_export
_user
(old_export
)
803 self
._create
_export
_user
(new_export
)
805 old_export
.path
!= new_export
.path
806 or old_fsal
.fs_name
!= new_fsal
.fs_name
808 self
._update
_user
_id
(
811 cast(str, new_fsal
.fs_name
),
812 cast(str, new_fsal
.user_id
)
814 new_fsal
.cephx_key
= old_fsal
.cephx_key
816 expected_mds_caps
= 'allow rw path={}'.format(new_export
.path
)
817 entity
= new_fsal
.user_id
818 ret
, out
, err
= self
.mgr
.mon_command({
819 'prefix': 'auth get',
820 'entity': 'client.{}'.format(entity
),
824 raise NFSException(f
'Failed to fetch caps for {entity}: {err}')
825 actual_mds_caps
= json
.loads(out
)[0]['caps'].get('mds')
826 if actual_mds_caps
!= expected_mds_caps
:
827 self
._update
_user
_id
(
830 cast(str, new_fsal
.fs_name
),
831 cast(str, new_fsal
.user_id
)
833 elif old_export
.pseudo
== new_export
.pseudo
:
834 need_nfs_service_restart
= False
835 new_fsal
.cephx_key
= old_fsal
.cephx_key
837 if old_export
.fsal
.name
== NFS_GANESHA_SUPPORTED_FSALS
[1]:
838 old_rgw_fsal
= cast(RGWFSAL
, old_export
.fsal
)
839 new_rgw_fsal
= cast(RGWFSAL
, new_export
.fsal
)
840 if old_rgw_fsal
.user_id
!= new_rgw_fsal
.user_id
:
841 self
._delete
_export
_user
(old_export
)
842 self
._create
_export
_user
(new_export
)
843 elif old_rgw_fsal
.access_key_id
!= new_rgw_fsal
.access_key_id
:
844 raise NFSInvalidOperation('access_key_id change is not allowed')
845 elif old_rgw_fsal
.secret_access_key
!= new_rgw_fsal
.secret_access_key
:
846 raise NFSInvalidOperation('secret_access_key change is not allowed')
848 self
.exports
[cluster_id
].remove(old_export
)
850 self
._update
_export
(cluster_id
, new_export
, need_nfs_service_restart
)
852 return {"pseudo": new_export
.pseudo
, "state": "updated"}
854 def _rados(self
, cluster_id
: str) -> NFSRados
:
855 """Return a new NFSRados object for the given cluster id."""
856 return NFSRados(self
.mgr
.rados
, cluster_id
)