]>
git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/volumes/fs/nfs.py
4 from typing
import List
6 from os
.path
import isabs
, normpath
8 from ceph
.deployment
.service_spec
import NFSServiceSpec
, PlacementSpec
9 from rados
import TimedOut
, ObjectNotFound
13 from .fs_util
import create_pool
15 log
= logging
.getLogger(__name__
)
16 POOL_NAME
= 'nfs-ganesha'
19 def available_clusters(mgr
):
21 This method returns list of available cluster ids.
22 Service name is service_type.service_id
24 completion.result value:
25 <ServiceDescription of <NFSServiceSpec for service_name=nfs.vstart>>
26 return value: ['vstart']
28 # TODO check cephadm cluster list with rados pool conf objects
29 completion
= mgr
.describe_service(service_type
='nfs')
30 orchestrator
.raise_if_exception(completion
)
31 return [cluster
.spec
.service_id
for cluster
in completion
.result
32 if cluster
.spec
.service_id
]
35 def restart_nfs_service(mgr
, cluster_id
):
37 This methods restarts the nfs daemons
39 completion
= mgr
.service_action(action
='restart',
40 service_name
='nfs.'+cluster_id
)
41 orchestrator
.raise_if_exception(completion
)
44 def export_cluster_checker(func
):
45 def cluster_check(fs_export
, *args
, **kwargs
):
47 This method checks if cluster exists and sets rados namespace.
49 if kwargs
['cluster_id'] not in available_clusters(fs_export
.mgr
):
50 return -errno
.ENOENT
, "", "Cluster does not exists"
51 fs_export
.rados_namespace
= kwargs
['cluster_id']
52 return func(fs_export
, *args
, **kwargs
)
56 def cluster_setter(func
):
57 def set_pool_ns_clusterid(nfs
, *args
, **kwargs
):
58 nfs
._set
_pool
_namespace
(kwargs
['cluster_id'])
59 nfs
._set
_cluster
_id
(kwargs
['cluster_id'])
60 return func(nfs
, *args
, **kwargs
)
61 return set_pool_ns_clusterid
64 class FSExportError(Exception):
65 def __init__(self
, err_msg
, errno
=-errno
.EINVAL
):
67 self
.err_msg
= err_msg
72 class GaneshaConfParser(object):
73 def __init__(self
, raw_config
):
76 self
.clean_config(raw_config
)
78 def clean_config(self
, raw_config
):
79 for line
in raw_config
.split("\n"):
81 if line
.startswith("%"):
84 def remove_whitespaces_quotes(self
):
85 if self
.text
.startswith("%url"):
86 self
.text
= self
.text
.replace('"', "")
88 self
.text
= "".join(self
.text
.split())
91 return self
.text
[self
.pos
:]
93 def parse_block_name(self
):
94 idx
= self
.stream().find('{')
96 raise Exception("Cannot find block name")
97 block_name
= self
.stream()[:idx
]
101 def parse_block_or_section(self
):
102 if self
.stream().startswith("%url "):
105 idx
= self
.stream().find('\n')
107 value
= self
.stream()
108 self
.pos
+= len(value
)
110 value
= self
.stream()[:idx
]
112 block_dict
= {'block_name': '%url', 'value': value
}
115 block_dict
= {'block_name': self
.parse_block_name().upper()}
116 self
.parse_block_body(block_dict
)
117 if self
.stream()[0] != '}':
118 raise Exception("No closing bracket '}' found at the end of block")
122 def parse_parameter_value(self
, raw_value
):
123 if raw_value
.find(',') != -1:
124 return [self
.parse_parameter_value(v
.strip())
125 for v
in raw_value
.split(',')]
127 return int(raw_value
)
129 if raw_value
== "true":
131 if raw_value
== "false":
133 if raw_value
.find('"') == 0:
134 return raw_value
[1:-1]
137 def parse_stanza(self
, block_dict
):
138 equal_idx
= self
.stream().find('=')
140 raise Exception("Malformed stanza: no equal symbol found.")
141 semicolon_idx
= self
.stream().find(';')
142 parameter_name
= self
.stream()[:equal_idx
].lower()
143 parameter_value
= self
.stream()[equal_idx
+1:semicolon_idx
]
144 block_dict
[parameter_name
] = self
.parse_parameter_value(parameter_value
)
145 self
.pos
+= semicolon_idx
+1
147 def parse_block_body(self
, block_dict
):
149 if self
.stream().find('}') == 0:
154 semicolon_idx
= self
.stream().find(';')
155 lbracket_idx
= self
.stream().find('{')
156 is_semicolon
= (semicolon_idx
!= -1)
157 is_lbracket
= (lbracket_idx
!= -1)
158 is_semicolon_lt_lbracket
= (semicolon_idx
< lbracket_idx
)
160 if is_semicolon
and ((is_lbracket
and is_semicolon_lt_lbracket
) or not is_lbracket
):
161 self
.parse_stanza(block_dict
)
162 elif is_lbracket
and ((is_semicolon
and not is_semicolon_lt_lbracket
) or
164 if '_blocks_' not in block_dict
:
165 block_dict
['_blocks_'] = []
166 block_dict
['_blocks_'].append(self
.parse_block_or_section())
168 raise Exception("Malformed stanza: no semicolon found.")
170 if last_pos
== self
.pos
:
171 raise Exception("Infinite loop while parsing block content")
174 self
.remove_whitespaces_quotes()
177 blocks
.append(self
.parse_block_or_section())
181 def _indentation(depth
, size
=4):
183 for _
in range(0, depth
*size
):
188 def write_block_body(block
, depth
=0):
189 def format_val(key
, val
):
190 if isinstance(val
, list):
191 return ', '.join([format_val(key
, v
) for v
in val
])
192 if isinstance(val
, bool):
193 return str(val
).lower()
194 if isinstance(val
, int) or (block
['block_name'] == 'CLIENT'
195 and key
== 'clients'):
196 return '{}'.format(val
)
197 return '"{}"'.format(val
)
200 for key
, val
in block
.items():
201 if key
== 'block_name':
203 elif key
== '_blocks_':
205 conf_str
+= GaneshaConfParser
.write_block(blo
, depth
)
207 conf_str
+= GaneshaConfParser
._indentation
(depth
)
208 conf_str
+= '{} = {};\n'.format(key
, format_val(key
, val
))
212 def write_block(block
, depth
=0):
213 if block
['block_name'] == "%url":
214 return '%url "{}"\n\n'.format(block
['value'])
217 conf_str
+= GaneshaConfParser
._indentation
(depth
)
218 conf_str
+= format(block
['block_name'])
220 conf_str
+= GaneshaConfParser
.write_block_body(block
, depth
+1)
221 conf_str
+= GaneshaConfParser
._indentation
(depth
)
227 def __init__(self
, name
, user_id
=None, fs_name
=None, sec_label_xattr
=None,
230 self
.fs_name
= fs_name
231 self
.user_id
= user_id
232 self
.sec_label_xattr
= sec_label_xattr
233 self
.cephx_key
= cephx_key
236 def from_fsal_block(cls
, fsal_block
):
237 return cls(fsal_block
['name'],
238 fsal_block
.get('user_id', None),
239 fsal_block
.get('filesystem', None),
240 fsal_block
.get('sec_label_xattr', None),
241 fsal_block
.get('secret_access_key', None))
243 def to_fsal_block(self
):
245 'block_name': 'FSAL',
249 result
['user_id'] = self
.user_id
251 result
['filesystem'] = self
.fs_name
252 if self
.sec_label_xattr
:
253 result
['sec_label_xattr'] = self
.sec_label_xattr
255 result
['secret_access_key'] = self
.cephx_key
259 def from_dict(cls
, fsal_dict
):
260 return cls(fsal_dict
['name'], fsal_dict
['user_id'],
261 fsal_dict
['fs_name'], fsal_dict
['sec_label_xattr'], None)
266 'user_id': self
.user_id
,
267 'fs_name': self
.fs_name
,
268 'sec_label_xattr': self
.sec_label_xattr
272 class Client(object):
273 def __init__(self
, addresses
, access_type
=None, squash
=None):
274 self
.addresses
= addresses
275 self
.access_type
= access_type
279 def from_client_block(cls
, client_block
):
280 addresses
= client_block
['clients']
281 if not isinstance(addresses
, list):
282 addresses
= [addresses
]
283 return cls(addresses
,
284 client_block
.get('access_type', None),
285 client_block
.get('squash', None))
287 def to_client_block(self
):
289 'block_name': 'CLIENT',
290 'clients': self
.addresses
,
293 result
['access_type'] = self
.access_type
295 result
['squash'] = self
.squash
299 def from_dict(cls
, client_dict
):
300 return cls(client_dict
['addresses'], client_dict
['access_type'],
301 client_dict
['squash'])
305 'addresses': self
.addresses
,
306 'access_type': self
.access_type
,
307 'squash': self
.squash
312 def __init__(self
, mgr
, namespace
):
314 self
.pool
= POOL_NAME
315 self
.namespace
= namespace
317 def _make_rados_url(self
, obj
):
318 return "rados://{}/{}/{}".format(self
.pool
, self
.namespace
, obj
)
320 def _create_url_block(self
, obj_name
):
321 return {'block_name': '%url', 'value': self
._make
_rados
_url
(obj_name
)}
323 def write_obj(self
, conf_block
, obj
, config_obj
=''):
325 conf_block
= GaneshaConfParser
.write_block(conf_block
)
327 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
328 ioctx
.set_namespace(self
.namespace
)
329 ioctx
.write_full(obj
, conf_block
.encode('utf-8'))
331 # Return after creating empty common config object
333 log
.debug("write configuration into rados object "
334 f
"{self.pool}/{self.namespace}/{obj}:\n{conf_block}")
336 # Add created obj url to common config obj
337 ioctx
.append(config_obj
, GaneshaConfParser
.write_block(
338 self
._create
_url
_block
(obj
)).encode('utf-8'))
339 FSExport
._check
_rados
_notify
(ioctx
, config_obj
)
340 log
.debug(f
"Added {obj} url to {config_obj}")
342 def update_obj(self
, conf_block
, obj
, config_obj
):
343 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
344 ioctx
.set_namespace(self
.namespace
)
345 ioctx
.write_full(obj
, conf_block
.encode('utf-8'))
346 log
.debug("write configuration into rados object "
347 f
"{self.pool}/{self.namespace}/{obj}:\n{conf_block}")
348 FSExport
._check
_rados
_notify
(ioctx
, config_obj
)
349 log
.debug(f
"Update export {obj} in {config_obj}")
351 def remove_obj(self
, obj
, config_obj
):
352 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
353 ioctx
.set_namespace(self
.namespace
)
354 export_urls
= ioctx
.read(config_obj
)
355 url
= '%url "{}"\n\n'.format(self
._make
_rados
_url
(obj
))
356 export_urls
= export_urls
.replace(url
.encode('utf-8'), b
'')
357 ioctx
.remove_object(obj
)
358 ioctx
.write_full(config_obj
, export_urls
)
359 FSExport
._check
_rados
_notify
(ioctx
, config_obj
)
360 log
.debug("Object deleted: {}".format(url
))
362 def remove_all_obj(self
):
363 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
364 ioctx
.set_namespace(self
.namespace
)
365 for obj
in ioctx
.list_objects():
368 def check_user_config(self
):
369 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
370 ioctx
.set_namespace(self
.namespace
)
371 for obj
in ioctx
.list_objects():
372 if obj
.key
.startswith("userconf-nfs"):
377 class Export(object):
378 # pylint: disable=R0902
379 def __init__(self
, export_id
, path
, cluster_id
, pseudo
, access_type
, squash
, security_label
,
380 protocols
, transports
, fsal
, clients
=None):
381 self
.export_id
= export_id
384 self
.cluster_id
= cluster_id
386 self
.access_type
= access_type
388 self
.attr_expiration_time
= 0
389 self
.security_label
= security_label
390 self
.protocols
= protocols
391 self
.transports
= transports
392 self
.clients
= clients
395 def from_export_block(cls
, export_block
, cluster_id
):
396 log
.debug("parsing export block: %s", export_block
)
398 fsal_block
= [b
for b
in export_block
['_blocks_']
399 if b
['block_name'] == "FSAL"]
401 client_blocks
= [b
for b
in export_block
['_blocks_']
402 if b
['block_name'] == "CLIENT"]
404 return cls(export_block
['export_id'],
405 export_block
['path'],
407 export_block
['pseudo'],
408 export_block
['access_type'],
409 export_block
['squash'],
410 export_block
['security_label'],
411 export_block
['protocols'],
412 export_block
['transports'],
413 CephFSFSal
.from_fsal_block(fsal_block
[0]),
414 [Client
.from_client_block(client
)
415 for client
in client_blocks
])
417 def to_export_block(self
):
418 # pylint: disable=too-many-branches
420 'block_name': 'EXPORT',
421 'export_id': self
.export_id
,
423 'pseudo': self
.pseudo
,
424 'access_type': self
.access_type
,
425 'squash': self
.squash
,
426 'attr_expiration_time': self
.attr_expiration_time
,
427 'security_label': self
.security_label
,
428 'protocols': self
.protocols
,
429 'transports': self
.transports
,
431 result
['_blocks_'] = [self
.fsal
.to_fsal_block()]
432 result
['_blocks_'].extend([client
.to_client_block()
433 for client
in self
.clients
])
437 def from_dict(cls
, export_id
, ex_dict
):
438 return cls(export_id
,
440 ex_dict
['cluster_id'],
442 ex_dict
.get('access_type', 'R'),
443 ex_dict
.get('squash', 'no_root_squash'),
444 ex_dict
.get('security_label', True),
445 ex_dict
.get('protocols', [4]),
446 ex_dict
.get('transports', ['TCP']),
447 CephFSFSal
.from_dict(ex_dict
['fsal']),
448 [Client
.from_dict(client
) for client
in ex_dict
['clients']])
452 'export_id': self
.export_id
,
454 'cluster_id': self
.cluster_id
,
455 'pseudo': self
.pseudo
,
456 'access_type': self
.access_type
,
457 'squash': self
.squash
,
458 'security_label': self
.security_label
,
459 'protocols': sorted([p
for p
in self
.protocols
]),
460 'transports': sorted([t
for t
in self
.transports
]),
461 'fsal': self
.fsal
.to_dict(),
462 'clients': [client
.to_dict() for client
in self
.clients
]
466 class FSExport(object):
467 def __init__(self
, mgr
, namespace
=None):
469 self
.rados_pool
= POOL_NAME
470 self
.rados_namespace
= namespace
474 def _check_rados_notify(ioctx
, obj
):
478 log
.exception(f
"Ganesha timed out")
482 if self
._exports
is None:
484 log
.info("Begin export parsing")
485 for cluster_id
in available_clusters(self
.mgr
):
486 self
.export_conf_objs
= [] # type: List[Export]
487 self
._read
_raw
_config
(cluster_id
)
488 self
.exports
[cluster_id
] = self
.export_conf_objs
489 log
.info(f
"Exports parsed successfully {self.exports.items()}")
492 def _fetch_export(self
, pseudo_path
):
494 for ex
in self
.exports
[self
.rados_namespace
]:
495 if ex
.pseudo
== pseudo_path
:
500 def _create_user_key(self
, entity
, path
, fs_name
, fs_ro
):
501 osd_cap
= 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
502 self
.rados_pool
, self
.rados_namespace
, fs_name
)
503 access_type
= 'r' if fs_ro
else 'rw'
505 ret
, out
, err
= self
.mgr
.check_mon_command({
506 'prefix': 'auth get-or-create',
507 'entity': 'client.{}'.format(entity
),
508 'caps': ['mon', 'allow r', 'osd', osd_cap
, 'mds', 'allow {} path={}'.format(
513 json_res
= json
.loads(out
)
514 log
.info("Export user created is {}".format(json_res
[0]['entity']))
515 return json_res
[0]['entity'], json_res
[0]['key']
517 def _delete_user(self
, entity
):
518 self
.mgr
.check_mon_command({
520 'entity': 'client.{}'.format(entity
),
522 log
.info(f
"Export user deleted is {entity}")
524 def _gen_export_id(self
):
525 exports
= sorted([ex
.export_id
for ex
in self
.exports
[self
.rados_namespace
]])
534 def _read_raw_config(self
, rados_namespace
):
535 with self
.mgr
.rados
.open_ioctx(self
.rados_pool
) as ioctx
:
536 ioctx
.set_namespace(rados_namespace
)
537 for obj
in ioctx
.list_objects():
538 if obj
.key
.startswith("export-"):
540 raw_config
= obj
.read(size
)
541 raw_config
= raw_config
.decode("utf-8")
542 log
.debug("read export configuration from rados "
543 "object %s/%s/%s:\n%s", self
.rados_pool
,
544 rados_namespace
, obj
.key
, raw_config
)
545 self
.export_conf_objs
.append(Export
.from_export_block(
546 GaneshaConfParser(raw_config
).parse()[0], rados_namespace
))
548 def _save_export(self
, export
):
549 self
.exports
[self
.rados_namespace
].append(export
)
550 NFSRados(self
.mgr
, self
.rados_namespace
).write_obj(export
.to_export_block(),
551 f
'export-{export.export_id}', f
'conf-nfs.{export.cluster_id}')
553 def _delete_export(self
, cluster_id
, pseudo_path
, export_obj
=None):
558 export
= self
._fetch
_export
(pseudo_path
)
562 NFSRados(self
.mgr
, self
.rados_namespace
).remove_obj(
563 f
'export-{export.export_id}', f
'conf-nfs.{cluster_id}')
564 self
.exports
[cluster_id
].remove(export
)
565 self
._delete
_user
(export
.fsal
.user_id
)
566 if not self
.exports
[cluster_id
]:
567 del self
.exports
[cluster_id
]
568 return 0, "Successfully deleted export", ""
569 return 0, "", "Export does not exist"
570 except Exception as e
:
571 log
.exception(f
"Failed to delete {pseudo_path} export for {cluster_id}")
572 return getattr(e
, 'errno', -1), "", str(e
)
574 def format_path(self
, path
):
576 path
= normpath(path
.strip())
581 def check_fs(self
, fs_name
):
582 fs_map
= self
.mgr
.get('fs_map')
583 return fs_name
in [fs
['mdsmap']['fs_name'] for fs
in fs_map
['filesystems']]
585 @export_cluster_checker
586 def create_export(self
, fs_name
, cluster_id
, pseudo_path
, read_only
, path
):
588 if not self
.check_fs(fs_name
):
589 return -errno
.ENOENT
, "", f
"filesystem {fs_name} not found"
591 pseudo_path
= self
.format_path(pseudo_path
)
592 self
._validate
_pseudo
_path
(pseudo_path
)
594 if cluster_id
not in self
.exports
:
595 self
.exports
[cluster_id
] = []
597 if not self
._fetch
_export
(pseudo_path
):
598 ex_id
= self
._gen
_export
_id
()
599 user_id
= f
"{cluster_id}{ex_id}"
600 user_out
, key
= self
._create
_user
_key
(user_id
, path
, fs_name
, read_only
)
605 'path': self
.format_path(path
),
606 'pseudo': pseudo_path
,
607 'cluster_id': cluster_id
,
608 'access_type': access_type
,
609 'fsal': {"name": "CEPH", "user_id": user_id
,
610 "fs_name": fs_name
, "sec_label_xattr": ""},
613 export
= Export
.from_dict(ex_id
, ex_dict
)
614 export
.fsal
.cephx_key
= key
615 self
._save
_export
(export
)
620 "cluster": cluster_id
,
623 return (0, json
.dumps(result
, indent
=4), '')
624 return 0, "", "Export already exists"
625 except Exception as e
:
626 log
.exception(f
"Failed to create {pseudo_path} export for {cluster_id}")
627 return getattr(e
, 'errno', -1), "", str(e
)
629 @export_cluster_checker
630 def delete_export(self
, cluster_id
, pseudo_path
):
631 return self
._delete
_export
(cluster_id
, pseudo_path
)
633 def delete_all_exports(self
, cluster_id
):
635 export_list
= list(self
.exports
[cluster_id
])
637 log
.info("No exports to delete")
639 self
.rados_namespace
= cluster_id
640 for export
in export_list
:
641 ret
, out
, err
= self
._delete
_export
(cluster_id
=cluster_id
, pseudo_path
=None,
644 raise Exception(f
"Failed to delete exports: {err} and {ret}")
645 log
.info(f
"All exports successfully deleted for cluster id: {cluster_id}")
647 @export_cluster_checker
648 def list_exports(self
, cluster_id
, detailed
):
651 result
= [export
.to_dict() for export
in self
.exports
[cluster_id
]]
653 result
= [export
.pseudo
for export
in self
.exports
[cluster_id
]]
654 return 0, json
.dumps(result
, indent
=2), ''
656 log
.warning(f
"No exports to list for {cluster_id}")
658 except Exception as e
:
659 log
.exception(f
"Failed to list exports for {cluster_id}")
660 return getattr(e
, 'errno', -1), "", str(e
)
662 @export_cluster_checker
663 def get_export(self
, cluster_id
, pseudo_path
):
665 export
= self
._fetch
_export
(pseudo_path
)
667 return 0, json
.dumps(export
.to_dict(), indent
=2), ''
668 log
.warning(f
"No {pseudo_path} export to show for {cluster_id}")
670 except Exception as e
:
671 log
.exception(f
"Failed to get {pseudo_path} export for {cluster_id}")
672 return getattr(e
, 'errno', -1), "", str(e
)
674 def _validate_pseudo_path(self
, path
):
675 if not isabs(path
) or path
== "/":
676 raise FSExportError(f
"pseudo path {path} is invalid. "\
677 "It should be an absolute path and it cannot be just '/'.")
679 def _validate_squash(self
, squash
):
680 valid_squash_ls
= ["root", "root_squash", "rootsquash", "rootid", "root_id_squash",
681 "rootidsquash", "all", "all_squash", "allsquash", "all_anomnymous", "allanonymous",
682 "no_root_squash", "none", "noidsquash"]
683 if squash
not in valid_squash_ls
:
684 raise FSExportError(f
"squash {squash} not in valid list {valid_squash_ls}")
686 def _validate_security_label(self
, label
):
687 if not isinstance(label
, bool):
688 raise FSExportError('Only boolean values allowed')
690 def _validate_protocols(self
, proto
):
693 raise FSExportError(f
"Invalid protocol {p}")
695 log
.warning("NFS V3 is an old version, it might not work")
697 def _validate_transport(self
, transport
):
698 valid_transport
= ["UDP", "TCP"]
699 for trans
in transport
:
700 if trans
.upper() not in valid_transport
:
701 raise FSExportError(f
'{trans} is not a valid transport protocol')
703 def _validate_access_type(self
, access_type
):
704 valid_ones
= ['RW', 'RO']
705 if access_type
not in valid_ones
:
706 raise FSExportError(f
'{access_type} is invalid, valid access type are {valid_ones}')
708 def _validate_fsal(self
, old
, new
):
709 if old
.name
!= new
['name']:
710 raise FSExportError('FSAL name change not allowed')
711 if old
.user_id
!= new
['user_id']:
712 raise FSExportError('User ID modification is not allowed')
713 if new
['sec_label_xattr']:
714 raise FSExportError('Security label xattr cannot be changed')
715 if old
.fs_name
!= new
['fs_name']:
716 if not self
.check_fs(new
['fs_name']):
717 raise FSExportError(f
"filesystem {new['fs_name']} not found")
720 def _validate_client(self
, client
):
721 self
._validate
_access
_type
(client
['access_type'])
722 self
._validate
_squash
(client
['squash'])
724 def _validate_clients(self
, clients_ls
):
725 for client
in clients_ls
:
726 self
._validate
_client
(client
)
728 def _fetch_export_obj(self
, ex_id
):
730 with self
.mgr
.rados
.open_ioctx(self
.rados_pool
) as ioctx
:
731 ioctx
.set_namespace(self
.rados_namespace
)
732 export
= Export
.from_export_block(GaneshaConfParser(ioctx
.read(f
"export-{ex_id}"
733 ).decode("utf-8")).parse()[0], self
.rados_namespace
)
735 except ObjectNotFound
:
736 log
.exception(f
"Export ID: {ex_id} not found")
738 def _validate_export(self
, new_export_dict
):
739 if new_export_dict
['cluster_id'] not in available_clusters(self
.mgr
):
740 raise FSExportError(f
"Cluster {new_export_dict['cluster_id']} does not exists",
742 export
= self
._fetch
_export
(new_export_dict
['pseudo'])
745 # Check if export id matches
746 if export
.export_id
!= new_export_dict
['export_id']:
747 raise FSExportError('Export ID changed, Cannot update export')
749 # Fetch export based on export id object
750 export
= self
._fetch
_export
_obj
(new_export_dict
['export_id'])
752 raise FSExportError('Export does not exist')
754 new_export_dict
['pseudo'] = self
.format_path(new_export_dict
['pseudo'])
755 self
._validate
_pseudo
_path
(new_export_dict
['pseudo'])
756 log
.debug(f
"Pseudo path has changed from {export.pseudo} to "\
757 f
"{new_export_dict['pseudo']}")
758 # Check if squash changed
759 if export
.squash
!= new_export_dict
['squash']:
760 if new_export_dict
['squash']:
761 new_export_dict
['squash'] = new_export_dict
['squash'].lower()
762 self
._validate
_squash
(new_export_dict
['squash'])
763 log
.debug(f
"squash has changed from {export.squash} to {new_export_dict['squash']}")
764 # Security label check
765 if export
.security_label
!= new_export_dict
['security_label']:
766 self
._validate
_security
_label
(new_export_dict
['security_label'])
768 if export
.protocols
!= new_export_dict
['protocols']:
769 self
._validate
_protocols
(new_export_dict
['protocols'])
771 if export
.transports
!= new_export_dict
['transports']:
772 self
._validate
_transport
(new_export_dict
['transports'])
774 if export
.path
!= new_export_dict
['path']:
775 new_export_dict
['path'] = self
.format_path(new_export_dict
['path'])
776 out_msg
= 'update caps'
778 if export
.access_type
!= new_export_dict
['access_type']:
779 self
._validate
_access
_type
(new_export_dict
['access_type'])
781 if export
.fsal
!= new_export_dict
['fsal']:
782 ret
= self
._validate
_fsal
(export
.fsal
, new_export_dict
['fsal'])
783 if ret
== 1 and not out_msg
:
784 out_msg
= 'update caps'
786 if export
.clients
!= new_export_dict
['clients']:
787 self
._validate
_clients
(new_export_dict
['clients'])
788 log
.debug(f
'Validation succeeded for Export {export.pseudo}')
789 return export
, out_msg
791 def _update_user_id(self
, path
, access_type
, fs_name
, user_id
):
792 osd_cap
= 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
793 self
.rados_pool
, self
.rados_namespace
, fs_name
)
794 access_type
= 'r' if access_type
== 'RO' else 'rw'
796 self
.mgr
.check_mon_command({
797 'prefix': 'auth caps',
798 'entity': f
'client.{user_id}',
799 'caps': ['mon', 'allow r', 'osd', osd_cap
, 'mds', 'allow {} path={}'.format(
803 log
.info(f
"Export user updated {user_id}")
805 def _update_export(self
, export
):
806 self
.exports
[self
.rados_namespace
].append(export
)
807 NFSRados(self
.mgr
, self
.rados_namespace
).update_obj(
808 GaneshaConfParser
.write_block(export
.to_export_block()),
809 f
'export-{export.export_id}', f
'conf-nfs.{export.cluster_id}')
811 def update_export(self
, export_config
):
813 if not export_config
:
814 return -errno
.EINVAL
, "", "Empty Config!!"
815 update_export
= json
.loads(export_config
)
816 old_export
, update_user_caps
= self
._validate
_export
(update_export
)
818 self
._update
_user
_id
(update_export
['path'], update_export
['access_type'],
819 update_export
['fsal']['fs_name'], update_export
['fsal']['user_id'])
820 update_export
= Export
.from_dict(update_export
['export_id'], update_export
)
821 update_export
.fsal
.cephx_key
= old_export
.fsal
.cephx_key
822 self
._update
_export
(update_export
)
823 export_ls
= self
.exports
[self
.rados_namespace
]
824 if old_export
not in export_ls
:
825 # This happens when export is fetched by ID
826 old_export
= self
._fetch
_export
(old_export
.pseudo
)
827 export_ls
.remove(old_export
)
828 restart_nfs_service(self
.mgr
, update_export
.cluster_id
)
829 return 0, "Successfully updated export", ""
830 except NotImplementedError:
831 return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
832 except Exception as e
:
833 return getattr(e
, 'errno', -1), '', f
'Failed to update export: {e}'
837 def __init__(self
, mgr
):
838 self
.pool_name
= POOL_NAME
842 def _set_cluster_id(self
, cluster_id
):
843 self
.cluster_id
= cluster_id
845 def _set_pool_namespace(self
, cluster_id
):
846 self
.pool_ns
= cluster_id
848 def _get_common_conf_obj_name(self
):
849 return f
'conf-nfs.{self.cluster_id}'
851 def _get_user_conf_obj_name(self
):
852 return f
'userconf-nfs.{self.cluster_id}'
854 def _call_orch_apply_nfs(self
, placement
):
855 spec
= NFSServiceSpec(service_type
='nfs', service_id
=self
.cluster_id
,
856 pool
=self
.pool_name
, namespace
=self
.pool_ns
,
857 placement
=PlacementSpec
.from_string(placement
))
858 completion
= self
.mgr
.apply_nfs(spec
)
859 orchestrator
.raise_if_exception(completion
)
861 def create_empty_rados_obj(self
):
862 common_conf
= self
._get
_common
_conf
_obj
_name
()
863 NFSRados(self
.mgr
, self
.pool_ns
).write_obj('', self
._get
_common
_conf
_obj
_name
())
864 log
.info(f
"Created empty object:{common_conf}")
866 def delete_config_obj(self
):
867 NFSRados(self
.mgr
, self
.pool_ns
).remove_all_obj()
868 log
.info(f
"Deleted {self._get_common_conf_obj_name()} object and all objects in "
872 def create_nfs_cluster(self
, export_type
, cluster_id
, placement
):
873 if export_type
!= 'cephfs':
874 return -errno
.EINVAL
, "", f
"Invalid export type: {export_type}"
876 pool_list
= [p
['pool_name'] for p
in self
.mgr
.get_osdmap().dump().get('pools', [])]
878 if self
.pool_name
not in pool_list
:
879 r
, out
, err
= create_pool(self
.mgr
, self
.pool_name
)
882 log
.info(f
"Pool Status: {out}")
884 self
.mgr
.check_mon_command({'prefix': 'osd pool application enable',
885 'pool': self
.pool_name
, 'app': 'nfs'})
887 self
.create_empty_rados_obj()
889 if cluster_id
not in available_clusters(self
.mgr
):
890 self
._call
_orch
_apply
_nfs
(placement
)
891 return 0, "NFS Cluster Created Successfully", ""
892 return 0, "", f
"{cluster_id} cluster already exists"
893 except Exception as e
:
894 log
.exception(f
"NFS Cluster {cluster_id} could not be created")
895 return getattr(e
, 'errno', -1), "", str(e
)
898 def update_nfs_cluster(self
, cluster_id
, placement
):
900 if cluster_id
in available_clusters(self
.mgr
):
901 self
._call
_orch
_apply
_nfs
(placement
)
902 return 0, "NFS Cluster Updated Successfully", ""
903 return -errno
.ENOENT
, "", "Cluster does not exist"
904 except Exception as e
:
905 log
.exception(f
"NFS Cluster {cluster_id} could not be updated")
906 return getattr(e
, 'errno', -1), "", str(e
)
909 def delete_nfs_cluster(self
, cluster_id
):
911 cluster_list
= available_clusters(self
.mgr
)
912 if cluster_id
in cluster_list
:
913 self
.mgr
.fs_export
.delete_all_exports(cluster_id
)
914 completion
= self
.mgr
.remove_service('nfs.' + self
.cluster_id
)
915 orchestrator
.raise_if_exception(completion
)
916 self
.delete_config_obj()
917 return 0, "NFS Cluster Deleted Successfully", ""
918 return 0, "", "Cluster does not exist"
919 except Exception as e
:
920 log
.exception(f
"Failed to delete NFS Cluster {cluster_id}")
921 return getattr(e
, 'errno', -1), "", str(e
)
923 def list_nfs_cluster(self
):
925 return 0, '\n'.join(available_clusters(self
.mgr
)), ""
926 except Exception as e
:
927 log
.exception("Failed to list NFS Cluster")
928 return getattr(e
, 'errno', -1), "", str(e
)
930 def _show_nfs_cluster_info(self
, cluster_id
):
931 self
._set
_cluster
_id
(cluster_id
)
932 completion
= self
.mgr
.list_daemons(daemon_type
='nfs')
933 orchestrator
.raise_if_exception(completion
)
935 # Here completion.result is a list DaemonDescription objects
936 for cluster
in completion
.result
:
937 if self
.cluster_id
== cluster
.service_id():
939 getaddrinfo sample output: [(<AddressFamily.AF_INET: 2>,
940 <SocketKind.SOCK_STREAM: 1>, 6, 'xyz', ('172.217.166.98',2049)),
941 (<AddressFamily.AF_INET6: 10>, <SocketKind.SOCK_STREAM: 1>, 6, '',
942 ('2404:6800:4009:80d::200e', 2049, 0, 0))]
946 "hostname": cluster
.hostname
,
947 "ip": list(set([ip
[4][0] for ip
in socket
.getaddrinfo(
948 cluster
.hostname
, 2049, flags
=socket
.AI_CANONNAME
,
949 type=socket
.SOCK_STREAM
)])),
950 "port": 2049 # Default ganesha port
952 except socket
.gaierror
:
956 def show_nfs_cluster_info(self
, cluster_id
=None):
961 cluster_ls
= [cluster_id
]
963 cluster_ls
= available_clusters(self
.mgr
)
965 for cluster_id
in cluster_ls
:
966 res
= self
._show
_nfs
_cluster
_info
(cluster_id
)
968 info_res
[cluster_id
] = res
969 return (0, json
.dumps(info_res
, indent
=4), '')
970 except Exception as e
:
971 log
.exception(f
"Failed to show info for cluster")
972 return getattr(e
, 'errno', -1), "", str(e
)
975 def set_nfs_cluster_config(self
, cluster_id
, nfs_config
):
978 return -errno
.EINVAL
, "", "Empty Config!!"
979 if cluster_id
in available_clusters(self
.mgr
):
980 rados_obj
= NFSRados(self
.mgr
, self
.pool_ns
)
981 if rados_obj
.check_user_config():
982 return 0, "", "NFS-Ganesha User Config already exists"
983 rados_obj
.write_obj(nfs_config
, self
._get
_user
_conf
_obj
_name
(),
984 self
._get
_common
_conf
_obj
_name
())
985 restart_nfs_service(self
.mgr
, cluster_id
)
986 return 0, "NFS-Ganesha Config Set Successfully", ""
987 return -errno
.ENOENT
, "", "Cluster does not exist"
988 except NotImplementedError:
989 return 0, "NFS-Ganesha Config Added Successfully (Manual Restart of NFS PODS required)", ""
990 except Exception as e
:
991 log
.exception(f
"Setting NFS-Ganesha Config failed for {cluster_id}")
992 return getattr(e
, 'errno', -1), "", str(e
)
995 def reset_nfs_cluster_config(self
, cluster_id
):
997 if cluster_id
in available_clusters(self
.mgr
):
998 rados_obj
= NFSRados(self
.mgr
, self
.pool_ns
)
999 if not rados_obj
.check_user_config():
1000 return 0, "", "NFS-Ganesha User Config does not exist"
1001 rados_obj
.remove_obj(self
._get
_user
_conf
_obj
_name
(),
1002 self
._get
_common
_conf
_obj
_name
())
1003 restart_nfs_service(self
.mgr
, cluster_id
)
1004 return 0, "NFS-Ganesha Config Reset Successfully", ""
1005 return -errno
.ENOENT
, "", "Cluster does not exist"
1006 except NotImplementedError:
1007 return 0, "NFS-Ganesha Config Removed Successfully (Manual Restart of NFS PODS required)", ""
1008 except Exception as e
:
1009 log
.exception(f
"Resetting NFS-Ganesha Config failed for {cluster_id}")
1010 return getattr(e
, 'errno', -1), "", str(e
)