]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/nfs/export.py
import ceph 16.2.7
[ceph.git] / ceph / src / pybind / mgr / nfs / export.py
1 import errno
2 import json
3 import logging
4 from typing import List, Any, Dict, Tuple, Optional, TYPE_CHECKING, TypeVar, Callable, cast
5 from os.path import normpath
6
7 from rados import TimedOut, ObjectNotFound
8
9 from mgr_module import NFS_POOL_NAME as POOL_NAME, NFS_GANESHA_SUPPORTED_FSALS
10
11 from .export_utils import GaneshaConfParser, Export, RawBlock, CephFSFSAL, RGWFSAL
12 from .exception import NFSException, NFSInvalidOperation, FSNotFound, \
13 ClusterNotFound
14 from .utils import available_clusters, check_fs, restart_nfs_service
15
16 if TYPE_CHECKING:
17 from nfs.module import Module
18
19 FuncT = TypeVar('FuncT', bound=Callable)
20
21 log = logging.getLogger(__name__)
22
23
24 def export_cluster_checker(func: FuncT) -> FuncT:
25 def cluster_check(
26 export: 'ExportMgr',
27 *args: Any,
28 **kwargs: Any
29 ) -> Tuple[int, str, str]:
30 """
31 This method checks if cluster exists
32 """
33 if kwargs['cluster_id'] not in available_clusters(export.mgr):
34 return -errno.ENOENT, "", "Cluster does not exists"
35 return func(export, *args, **kwargs)
36 return cast(FuncT, cluster_check)
37
38
39 def exception_handler(
40 exception_obj: Exception,
41 log_msg: str = ""
42 ) -> Tuple[int, str, str]:
43 if log_msg:
44 log.exception(log_msg)
45 return getattr(exception_obj, 'errno', -1), "", str(exception_obj)
46
47
48 class NFSRados:
49 def __init__(self, mgr: 'Module', namespace: str) -> None:
50 self.mgr = mgr
51 self.pool = POOL_NAME
52 self.namespace = namespace
53
54 def _make_rados_url(self, obj: str) -> str:
55 return "rados://{}/{}/{}".format(self.pool, self.namespace, obj)
56
57 def _create_url_block(self, obj_name: str) -> RawBlock:
58 return RawBlock('%url', values={'value': self._make_rados_url(obj_name)})
59
60 def write_obj(self, conf_block: str, obj: str, config_obj: str = '') -> None:
61 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
62 ioctx.set_namespace(self.namespace)
63 ioctx.write_full(obj, conf_block.encode('utf-8'))
64 if not config_obj:
65 # Return after creating empty common config object
66 return
67 log.debug("write configuration into rados object %s/%s/%s",
68 self.pool, self.namespace, obj)
69
70 # Add created obj url to common config obj
71 ioctx.append(config_obj, GaneshaConfParser.write_block(
72 self._create_url_block(obj)).encode('utf-8'))
73 ExportMgr._check_rados_notify(ioctx, config_obj)
74 log.debug("Added %s url to %s", obj, config_obj)
75
76 def read_obj(self, obj: str) -> Optional[str]:
77 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
78 ioctx.set_namespace(self.namespace)
79 try:
80 return ioctx.read(obj, 1048576).decode()
81 except ObjectNotFound:
82 return None
83
84 def update_obj(self, conf_block: str, obj: str, config_obj: str) -> None:
85 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
86 ioctx.set_namespace(self.namespace)
87 ioctx.write_full(obj, conf_block.encode('utf-8'))
88 log.debug("write configuration into rados object %s/%s/%s",
89 self.pool, self.namespace, obj)
90 ExportMgr._check_rados_notify(ioctx, config_obj)
91 log.debug("Update export %s in %s", obj, config_obj)
92
93 def remove_obj(self, obj: str, config_obj: str) -> None:
94 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
95 ioctx.set_namespace(self.namespace)
96 export_urls = ioctx.read(config_obj)
97 url = '%url "{}"\n\n'.format(self._make_rados_url(obj))
98 export_urls = export_urls.replace(url.encode('utf-8'), b'')
99 ioctx.remove_object(obj)
100 ioctx.write_full(config_obj, export_urls)
101 ExportMgr._check_rados_notify(ioctx, config_obj)
102 log.debug("Object deleted: %s", url)
103
104 def remove_all_obj(self) -> None:
105 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
106 ioctx.set_namespace(self.namespace)
107 for obj in ioctx.list_objects():
108 obj.remove()
109
110 def check_user_config(self) -> bool:
111 with self.mgr.rados.open_ioctx(self.pool) as ioctx:
112 ioctx.set_namespace(self.namespace)
113 for obj in ioctx.list_objects():
114 if obj.key.startswith("userconf-nfs"):
115 return True
116 return False
117
118
119 class ExportMgr:
120 def __init__(
121 self,
122 mgr: 'Module',
123 export_ls: Optional[Dict[str, List[Export]]] = None
124 ) -> None:
125 self.mgr = mgr
126 self.rados_pool = POOL_NAME
127 self._exports: Optional[Dict[str, List[Export]]] = export_ls
128
129 @staticmethod
130 def _check_rados_notify(ioctx: Any, obj: str) -> None:
131 try:
132 ioctx.notify(obj)
133 except TimedOut:
134 log.exception("Ganesha timed out")
135
136 @property
137 def exports(self) -> Dict[str, List[Export]]:
138 if self._exports is None:
139 self._exports = {}
140 log.info("Begin export parsing")
141 for cluster_id in available_clusters(self.mgr):
142 self.export_conf_objs = [] # type: List[Export]
143 self._read_raw_config(cluster_id)
144 self.exports[cluster_id] = self.export_conf_objs
145 log.info("Exports parsed successfully %s", self.exports.items())
146 return self._exports
147
148 def _fetch_export(
149 self,
150 cluster_id: str,
151 pseudo_path: str
152 ) -> Optional[Export]:
153 try:
154 for ex in self.exports[cluster_id]:
155 if ex.pseudo == pseudo_path:
156 return ex
157 return None
158 except KeyError:
159 log.info('no exports for cluster %s', cluster_id)
160 return None
161
162 def _fetch_export_id(
163 self,
164 cluster_id: str,
165 export_id: int
166 ) -> Optional[Export]:
167 try:
168 for ex in self.exports[cluster_id]:
169 if ex.export_id == export_id:
170 return ex
171 return None
172 except KeyError:
173 log.info(f'no exports for cluster {cluster_id}')
174 return None
175
176 def _delete_export_user(self, export: Export) -> None:
177 if isinstance(export.fsal, CephFSFSAL):
178 assert export.fsal.user_id
179 self.mgr.check_mon_command({
180 'prefix': 'auth rm',
181 'entity': 'client.{}'.format(export.fsal.user_id),
182 })
183 log.info("Deleted export user %s", export.fsal.user_id)
184 elif isinstance(export.fsal, RGWFSAL):
185 # do nothing; we're using the bucket owner creds.
186 pass
187
188 def _create_export_user(self, export: Export) -> None:
189 if isinstance(export.fsal, CephFSFSAL):
190 fsal = cast(CephFSFSAL, export.fsal)
191 assert fsal.fs_name
192
193 # is top-level or any client rw?
194 rw = export.access_type.lower() == 'rw'
195 for c in export.clients:
196 if c.access_type.lower() == 'rw':
197 rw = True
198 break
199
200 fsal.user_id = f"nfs.{export.cluster_id}.{export.export_id}"
201 fsal.cephx_key = self._create_user_key(
202 export.cluster_id, fsal.user_id, export.path, fsal.fs_name, not rw
203 )
204 log.debug("Successfully created user %s for cephfs path %s", fsal.user_id, export.path)
205
206 elif isinstance(export.fsal, RGWFSAL):
207 rgwfsal = cast(RGWFSAL, export.fsal)
208 if not rgwfsal.user_id:
209 assert export.path
210 ret, out, err = self.mgr.tool_exec(
211 ['radosgw-admin', 'bucket', 'stats', '--bucket', export.path]
212 )
213 if ret:
214 raise NFSException(f'Failed to fetch owner for bucket {export.path}')
215 j = json.loads(out)
216 owner = j.get('owner', '')
217 rgwfsal.user_id = owner
218 assert rgwfsal.user_id
219 ret, out, err = self.mgr.tool_exec([
220 'radosgw-admin', 'user', 'info', '--uid', rgwfsal.user_id
221 ])
222 if ret:
223 raise NFSException(
224 f'Failed to fetch key for bucket {export.path} owner {rgwfsal.user_id}'
225 )
226 j = json.loads(out)
227
228 # FIXME: make this more tolerate of unexpected output?
229 rgwfsal.access_key_id = j['keys'][0]['access_key']
230 rgwfsal.secret_access_key = j['keys'][0]['secret_key']
231 log.debug("Successfully fetched user %s for RGW path %s", rgwfsal.user_id, export.path)
232
233 def _gen_export_id(self, cluster_id: str) -> int:
234 exports = sorted([ex.export_id for ex in self.exports[cluster_id]])
235 nid = 1
236 for e_id in exports:
237 if e_id == nid:
238 nid += 1
239 else:
240 break
241 return nid
242
243 def _read_raw_config(self, rados_namespace: str) -> None:
244 with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
245 ioctx.set_namespace(rados_namespace)
246 for obj in ioctx.list_objects():
247 if obj.key.startswith("export-"):
248 size, _ = obj.stat()
249 raw_config = obj.read(size)
250 raw_config = raw_config.decode("utf-8")
251 log.debug("read export configuration from rados "
252 "object %s/%s/%s", self.rados_pool,
253 rados_namespace, obj.key)
254 self.export_conf_objs.append(Export.from_export_block(
255 GaneshaConfParser(raw_config).parse()[0], rados_namespace))
256
257 def _save_export(self, cluster_id: str, export: Export) -> None:
258 self.exports[cluster_id].append(export)
259 NFSRados(self.mgr, cluster_id).write_obj(
260 GaneshaConfParser.write_block(export.to_export_block()),
261 f'export-{export.export_id}',
262 f'conf-nfs.{export.cluster_id}'
263 )
264
265 def _delete_export(
266 self,
267 cluster_id: str,
268 pseudo_path: Optional[str],
269 export_obj: Optional[Export] = None
270 ) -> Tuple[int, str, str]:
271 try:
272 if export_obj:
273 export: Optional[Export] = export_obj
274 else:
275 assert pseudo_path
276 export = self._fetch_export(cluster_id, pseudo_path)
277
278 if export:
279 if pseudo_path:
280 NFSRados(self.mgr, cluster_id).remove_obj(
281 f'export-{export.export_id}', f'conf-nfs.{cluster_id}')
282 self.exports[cluster_id].remove(export)
283 self._delete_export_user(export)
284 if not self.exports[cluster_id]:
285 del self.exports[cluster_id]
286 log.debug("Deleted all exports for cluster %s", cluster_id)
287 return 0, "Successfully deleted export", ""
288 return 0, "", "Export does not exist"
289 except Exception as e:
290 return exception_handler(e, f"Failed to delete {pseudo_path} export for {cluster_id}")
291
292 def _fetch_export_obj(self, cluster_id: str, ex_id: int) -> Optional[Export]:
293 try:
294 with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
295 ioctx.set_namespace(cluster_id)
296 export = Export.from_export_block(
297 GaneshaConfParser(
298 ioctx.read(f"export-{ex_id}").decode("utf-8")
299 ).parse()[0],
300 cluster_id
301 )
302 return export
303 except ObjectNotFound:
304 log.exception("Export ID: %s not found", ex_id)
305 return None
306
307 def _update_export(self, cluster_id: str, export: Export) -> None:
308 self.exports[cluster_id].append(export)
309 NFSRados(self.mgr, cluster_id).update_obj(
310 GaneshaConfParser.write_block(export.to_export_block()),
311 f'export-{export.export_id}', f'conf-nfs.{export.cluster_id}')
312
313 def format_path(self, path: str) -> str:
314 if path:
315 path = normpath(path.strip())
316 if path[:2] == "//":
317 path = path[1:]
318 return path
319
320 @export_cluster_checker
321 def create_export(self, addr: Optional[List[str]] = None, **kwargs: Any) -> Tuple[int, str, str]:
322 # if addr(s) are provided, construct client list and adjust outer block
323 clients = []
324 if addr:
325 clients = [{
326 'addresses': addr,
327 'access_type': 'ro' if kwargs['read_only'] else 'rw',
328 'squash': kwargs['squash'],
329 }]
330 kwargs['squash'] = 'none'
331 kwargs['clients'] = clients
332
333 if clients:
334 kwargs['access_type'] = "none"
335 elif kwargs['read_only']:
336 kwargs['access_type'] = "RO"
337 else:
338 kwargs['access_type'] = "RW"
339
340 if kwargs['cluster_id'] not in self.exports:
341 self.exports[kwargs['cluster_id']] = []
342
343 try:
344 fsal_type = kwargs.pop('fsal_type')
345 if fsal_type == 'cephfs':
346 return self.create_cephfs_export(**kwargs)
347 if fsal_type == 'rgw':
348 return self.create_rgw_export(**kwargs)
349 raise NotImplementedError()
350 except Exception as e:
351 return exception_handler(e, f"Failed to create {kwargs['pseudo_path']} export for {kwargs['cluster_id']}")
352
353 @export_cluster_checker
354 def delete_export(self,
355 cluster_id: str,
356 pseudo_path: str) -> Tuple[int, str, str]:
357 return self._delete_export(cluster_id, pseudo_path)
358
359 def delete_all_exports(self, cluster_id: str) -> None:
360 try:
361 export_list = list(self.exports[cluster_id])
362 except KeyError:
363 log.info("No exports to delete")
364 return
365 for export in export_list:
366 ret, out, err = self._delete_export(cluster_id=cluster_id, pseudo_path=None,
367 export_obj=export)
368 if ret != 0:
369 raise NFSException(f"Failed to delete exports: {err} and {ret}")
370 log.info("All exports successfully deleted for cluster id: %s", cluster_id)
371
372 def list_all_exports(self) -> List[Dict[str, Any]]:
373 r = []
374 for cluster_id, ls in self.exports.items():
375 r.extend([e.to_dict() for e in ls])
376 return r
377
378 @export_cluster_checker
379 def list_exports(self,
380 cluster_id: str,
381 detailed: bool = False) -> Tuple[int, str, str]:
382 try:
383 if detailed:
384 result_d = [export.to_dict() for export in self.exports[cluster_id]]
385 return 0, json.dumps(result_d, indent=2), ''
386 else:
387 result_ps = [export.pseudo for export in self.exports[cluster_id]]
388 return 0, json.dumps(result_ps, indent=2), ''
389
390 except KeyError:
391 log.warning("No exports to list for %s", cluster_id)
392 return 0, '', ''
393 except Exception as e:
394 return exception_handler(e, f"Failed to list exports for {cluster_id}")
395
396 def _get_export_dict(self, cluster_id: str, pseudo_path: str) -> Optional[Dict[str, Any]]:
397 export = self._fetch_export(cluster_id, pseudo_path)
398 if export:
399 return export.to_dict()
400 log.warning(f"No {pseudo_path} export to show for {cluster_id}")
401 return None
402
403 @export_cluster_checker
404 def get_export(
405 self,
406 cluster_id: str,
407 pseudo_path: str,
408 ) -> Tuple[int, str, str]:
409 try:
410 export_dict = self._get_export_dict(cluster_id, pseudo_path)
411 if export_dict:
412 return 0, json.dumps(export_dict, indent=2), ''
413 log.warning("No %s export to show for %s", pseudo_path, cluster_id)
414 return 0, '', ''
415 except Exception as e:
416 return exception_handler(e, f"Failed to get {pseudo_path} export for {cluster_id}")
417
418 def get_export_by_id(
419 self,
420 cluster_id: str,
421 export_id: int
422 ) -> Optional[Dict[str, Any]]:
423 export = self._fetch_export_id(cluster_id, export_id)
424 return export.to_dict() if export else None
425
426 def get_export_by_pseudo(
427 self,
428 cluster_id: str,
429 pseudo_path: str
430 ) -> Optional[Dict[str, Any]]:
431 export = self._fetch_export(cluster_id, pseudo_path)
432 return export.to_dict() if export else None
433
434 def apply_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
435 try:
436 if not export_config:
437 raise NFSInvalidOperation("Empty Config!!")
438 try:
439 j = json.loads(export_config)
440 except ValueError:
441 # okay, not JSON. is it an EXPORT block?
442 try:
443 blocks = GaneshaConfParser(export_config).parse()
444 exports = [
445 Export.from_export_block(block, cluster_id)
446 for block in blocks
447 ]
448 j = [export.to_dict() for export in exports]
449 except Exception as ex:
450 raise NFSInvalidOperation(f"Input must be JSON or a ganesha EXPORT block: {ex}")
451
452 # check export type
453 if isinstance(j, list):
454 ret, out, err = (0, '', '')
455 for export in j:
456 try:
457 r, o, e = self._apply_export(cluster_id, export)
458 except Exception as ex:
459 r, o, e = exception_handler(ex, f'Failed to apply export: {ex}')
460 if r:
461 ret = r
462 if o:
463 out += o + '\n'
464 if e:
465 err += e + '\n'
466 return ret, out, err
467 else:
468 r, o, e = self._apply_export(cluster_id, j)
469 return r, o, e
470 except NotImplementedError:
471 return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
472 except Exception as e:
473 return exception_handler(e, f'Failed to update export: {e}')
474
475 def _update_user_id(
476 self,
477 cluster_id: str,
478 path: str,
479 access_type: str,
480 fs_name: str,
481 user_id: str
482 ) -> None:
483 osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
484 self.rados_pool, cluster_id, fs_name)
485 access_type = 'r' if access_type == 'RO' else 'rw'
486
487 self.mgr.check_mon_command({
488 'prefix': 'auth caps',
489 'entity': f'client.{user_id}',
490 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow {} path={}'.format(
491 access_type, path)],
492 })
493
494 log.info("Export user updated %s", user_id)
495
496 def _create_user_key(
497 self,
498 cluster_id: str,
499 entity: str,
500 path: str,
501 fs_name: str,
502 fs_ro: bool
503 ) -> str:
504 osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
505 self.rados_pool, cluster_id, fs_name)
506 access_type = 'r' if fs_ro else 'rw'
507 nfs_caps = [
508 'mon', 'allow r',
509 'osd', osd_cap,
510 'mds', 'allow {} path={}'.format(access_type, path)
511 ]
512
513 ret, out, err = self.mgr.mon_command({
514 'prefix': 'auth get-or-create',
515 'entity': 'client.{}'.format(entity),
516 'caps': nfs_caps,
517 'format': 'json',
518 })
519 if ret == -errno.EINVAL and 'does not match' in err:
520 ret, out, err = self.mgr.mon_command({
521 'prefix': 'auth caps',
522 'entity': 'client.{}'.format(entity),
523 'caps': nfs_caps,
524 'format': 'json',
525 })
526 if err:
527 raise NFSException(f'Failed to update caps for {entity}: {err}')
528 ret, out, err = self.mgr.mon_command({
529 'prefix': 'auth get',
530 'entity': 'client.{}'.format(entity),
531 'format': 'json',
532 })
533 if err:
534 raise NFSException(f'Failed to fetch caps for {entity}: {err}')
535
536 json_res = json.loads(out)
537 log.info("Export user created is %s", json_res[0]['entity'])
538 return json_res[0]['key']
539
540 def create_export_from_dict(self,
541 cluster_id: str,
542 ex_id: int,
543 ex_dict: Dict[str, Any]) -> Export:
544 pseudo_path = ex_dict.get("pseudo")
545 if not pseudo_path:
546 raise NFSInvalidOperation("export must specify pseudo path")
547
548 path = ex_dict.get("path")
549 if path is None:
550 raise NFSInvalidOperation("export must specify path")
551 path = self.format_path(path)
552
553 fsal = ex_dict.get("fsal", {})
554 fsal_type = fsal.get("name")
555 if fsal_type == NFS_GANESHA_SUPPORTED_FSALS[1]:
556 if '/' in path and path != '/':
557 raise NFSInvalidOperation('"/" is not allowed in path with bucket name')
558 elif fsal_type == NFS_GANESHA_SUPPORTED_FSALS[0]:
559 fs_name = fsal.get("fs_name")
560 if not fs_name:
561 raise NFSInvalidOperation("export FSAL must specify fs_name")
562 if not check_fs(self.mgr, fs_name):
563 raise FSNotFound(fs_name)
564
565 user_id = f"nfs.{cluster_id}.{ex_id}"
566 if "user_id" in fsal and fsal["user_id"] != user_id:
567 raise NFSInvalidOperation(f"export FSAL user_id must be '{user_id}'")
568 else:
569 raise NFSInvalidOperation(f"NFS Ganesha supported FSALs are {NFS_GANESHA_SUPPORTED_FSALS}."
570 "Export must specify any one of it.")
571
572 ex_dict["fsal"] = fsal
573 ex_dict["cluster_id"] = cluster_id
574 export = Export.from_dict(ex_id, ex_dict)
575 export.validate(self.mgr)
576 log.debug("Successfully created %s export-%s from dict for cluster %s",
577 fsal_type, ex_id, cluster_id)
578 return export
579
580 def create_cephfs_export(self,
581 fs_name: str,
582 cluster_id: str,
583 pseudo_path: str,
584 read_only: bool,
585 path: str,
586 squash: str,
587 access_type: str,
588 clients: list = []) -> Tuple[int, str, str]:
589 pseudo_path = self.format_path(pseudo_path)
590
591 if not self._fetch_export(cluster_id, pseudo_path):
592 export = self.create_export_from_dict(
593 cluster_id,
594 self._gen_export_id(cluster_id),
595 {
596 "pseudo": pseudo_path,
597 "path": path,
598 "access_type": access_type,
599 "squash": squash,
600 "fsal": {
601 "name": NFS_GANESHA_SUPPORTED_FSALS[0],
602 "fs_name": fs_name,
603 },
604 "clients": clients,
605 }
606 )
607 log.debug("creating cephfs export %s", export)
608 self._create_export_user(export)
609 self._save_export(cluster_id, export)
610 result = {
611 "bind": export.pseudo,
612 "fs": fs_name,
613 "path": export.path,
614 "cluster": cluster_id,
615 "mode": export.access_type,
616 }
617 return (0, json.dumps(result, indent=4), '')
618 return 0, "", "Export already exists"
619
620 def create_rgw_export(self,
621 cluster_id: str,
622 pseudo_path: str,
623 access_type: str,
624 read_only: bool,
625 squash: str,
626 bucket: Optional[str] = None,
627 user_id: Optional[str] = None,
628 clients: list = []) -> Tuple[int, str, str]:
629 pseudo_path = self.format_path(pseudo_path)
630
631 if not bucket and not user_id:
632 return -errno.EINVAL, "", "Must specify either bucket or user_id"
633
634 if not self._fetch_export(cluster_id, pseudo_path):
635 export = self.create_export_from_dict(
636 cluster_id,
637 self._gen_export_id(cluster_id),
638 {
639 "pseudo": pseudo_path,
640 "path": bucket or '/',
641 "access_type": access_type,
642 "squash": squash,
643 "fsal": {
644 "name": NFS_GANESHA_SUPPORTED_FSALS[1],
645 "user_id": user_id,
646 },
647 "clients": clients,
648 }
649 )
650 log.debug("creating rgw export %s", export)
651 self._create_export_user(export)
652 self._save_export(cluster_id, export)
653 result = {
654 "bind": export.pseudo,
655 "path": export.path,
656 "cluster": cluster_id,
657 "mode": export.access_type,
658 "squash": export.squash,
659 }
660 return (0, json.dumps(result, indent=4), '')
661 return 0, "", "Export already exists"
662
663 def _apply_export(
664 self,
665 cluster_id: str,
666 new_export_dict: Dict,
667 ) -> Tuple[int, str, str]:
668 for k in ['path', 'pseudo']:
669 if k not in new_export_dict:
670 raise NFSInvalidOperation(f'Export missing required field {k}')
671 if cluster_id not in available_clusters(self.mgr):
672 raise ClusterNotFound()
673 if cluster_id not in self.exports:
674 self.exports[cluster_id] = []
675
676 new_export_dict['path'] = self.format_path(new_export_dict['path'])
677 new_export_dict['pseudo'] = self.format_path(new_export_dict['pseudo'])
678
679 old_export = self._fetch_export(cluster_id, new_export_dict['pseudo'])
680 if old_export:
681 # Check if export id matches
682 if new_export_dict.get('export_id'):
683 if old_export.export_id != new_export_dict.get('export_id'):
684 raise NFSInvalidOperation('Export ID changed, Cannot update export')
685 else:
686 new_export_dict['export_id'] = old_export.export_id
687 elif new_export_dict.get('export_id'):
688 old_export = self._fetch_export_obj(cluster_id, new_export_dict['export_id'])
689 if old_export:
690 # re-fetch via old pseudo
691 old_export = self._fetch_export(cluster_id, old_export.pseudo)
692 assert old_export
693 log.debug("export %s pseudo %s -> %s",
694 old_export.export_id, old_export.pseudo, new_export_dict['pseudo'])
695
696 new_export = self.create_export_from_dict(
697 cluster_id,
698 new_export_dict.get('export_id', self._gen_export_id(cluster_id)),
699 new_export_dict
700 )
701
702 if not old_export:
703 self._create_export_user(new_export)
704 self._save_export(cluster_id, new_export)
705 return 0, f'Added export {new_export.pseudo}', ''
706
707 if old_export.fsal.name != new_export.fsal.name:
708 raise NFSInvalidOperation('FSAL change not allowed')
709 if old_export.pseudo != new_export.pseudo:
710 log.debug('export %s pseudo %s -> %s',
711 new_export.export_id, old_export.pseudo, new_export.pseudo)
712
713 if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]:
714 old_fsal = cast(CephFSFSAL, old_export.fsal)
715 new_fsal = cast(CephFSFSAL, new_export.fsal)
716 if old_fsal.user_id != new_fsal.user_id:
717 self._delete_export_user(old_export)
718 self._create_export_user(new_export)
719 elif (
720 old_export.path != new_export.path
721 or old_fsal.fs_name != new_fsal.fs_name
722 ):
723 self._update_user_id(
724 cluster_id,
725 new_export.path,
726 new_export.access_type,
727 cast(str, new_fsal.fs_name),
728 cast(str, new_fsal.user_id)
729 )
730 new_fsal.cephx_key = old_fsal.cephx_key
731 else:
732 new_fsal.cephx_key = old_fsal.cephx_key
733 if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
734 old_rgw_fsal = cast(RGWFSAL, old_export.fsal)
735 new_rgw_fsal = cast(RGWFSAL, new_export.fsal)
736 if old_rgw_fsal.user_id != new_rgw_fsal.user_id:
737 self._delete_export_user(old_export)
738 self._create_export_user(new_export)
739 elif old_rgw_fsal.access_key_id != new_rgw_fsal.access_key_id:
740 raise NFSInvalidOperation('access_key_id change is not allowed')
741 elif old_rgw_fsal.secret_access_key != new_rgw_fsal.secret_access_key:
742 raise NFSInvalidOperation('secret_access_key change is not allowed')
743
744 self.exports[cluster_id].remove(old_export)
745 self._update_export(cluster_id, new_export)
746
747 # TODO: detect whether the update is such that a reload is sufficient
748 restart_nfs_service(self.mgr, new_export.cluster_id)
749
750 return 0, f"Updated export {new_export.pseudo}", ""