]>
git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/volumes/fs/nfs.py
4 from typing
import List
6 from os
.path
import isabs
, normpath
8 from ceph
.deployment
.service_spec
import NFSServiceSpec
, PlacementSpec
9 from rados
import TimedOut
13 from .fs_util
import create_pool
15 log
= logging
.getLogger(__name__
)
16 POOL_NAME
= 'nfs-ganesha'
19 def available_clusters(mgr
):
21 This method returns list of available cluster ids.
22 It removes 'ganesha-' prefixes from cluster service id returned by cephadm.
24 completion.result value:
25 <ServiceDescription of <NFSServiceSpec for service_name=nfs.ganesha-vstart>>
26 return value: ['ganesha-vstart'] -> ['vstart']
28 # TODO check cephadm cluster list with rados pool conf objects
29 completion
= mgr
.describe_service(service_type
='nfs')
30 mgr
._orchestrator
_wait
([completion
])
31 orchestrator
.raise_if_exception(completion
)
32 return [cluster
.spec
.service_id
.replace('ganesha-', '', 1) for cluster
in completion
.result
33 if cluster
.spec
.service_id
]
36 def export_cluster_checker(func
):
37 def cluster_check(fs_export
, *args
, **kwargs
):
39 This method checks if cluster exists and sets rados namespace.
41 if kwargs
['cluster_id'] not in available_clusters(fs_export
.mgr
):
42 return -errno
.ENOENT
, "", "Cluster does not exists"
43 fs_export
.rados_namespace
= kwargs
['cluster_id']
44 return func(fs_export
, *args
, **kwargs
)
48 def cluster_setter(func
):
49 def set_pool_ns_clusterid(nfs
, *args
, **kwargs
):
50 nfs
._set
_pool
_namespace
(kwargs
['cluster_id'])
51 nfs
._set
_cluster
_id
(kwargs
['cluster_id'])
52 return func(nfs
, *args
, **kwargs
)
53 return set_pool_ns_clusterid
56 class GaneshaConfParser(object):
57 def __init__(self
, raw_config
):
60 self
.clean_config(raw_config
)
62 def clean_config(self
, raw_config
):
63 for line
in raw_config
.split("\n"):
65 if line
.startswith("%"):
68 def remove_whitespaces_quotes(self
):
69 if self
.text
.startswith("%url"):
70 self
.text
= self
.text
.replace('"', "")
72 self
.text
= "".join(self
.text
.split())
75 return self
.text
[self
.pos
:]
77 def parse_block_name(self
):
78 idx
= self
.stream().find('{')
80 raise Exception("Cannot find block name")
81 block_name
= self
.stream()[:idx
]
85 def parse_block_or_section(self
):
86 if self
.stream().startswith("%url "):
89 idx
= self
.stream().find('\n')
92 self
.pos
+= len(value
)
94 value
= self
.stream()[:idx
]
96 block_dict
= {'block_name': '%url', 'value': value
}
99 block_dict
= {'block_name': self
.parse_block_name().upper()}
100 self
.parse_block_body(block_dict
)
101 if self
.stream()[0] != '}':
102 raise Exception("No closing bracket '}' found at the end of block")
106 def parse_parameter_value(self
, raw_value
):
107 if raw_value
.find(',') != -1:
108 return [self
.parse_parameter_value(v
.strip())
109 for v
in raw_value
.split(',')]
111 return int(raw_value
)
113 if raw_value
== "true":
115 if raw_value
== "false":
117 if raw_value
.find('"') == 0:
118 return raw_value
[1:-1]
121 def parse_stanza(self
, block_dict
):
122 equal_idx
= self
.stream().find('=')
124 raise Exception("Malformed stanza: no equal symbol found.")
125 semicolon_idx
= self
.stream().find(';')
126 parameter_name
= self
.stream()[:equal_idx
].lower()
127 parameter_value
= self
.stream()[equal_idx
+1:semicolon_idx
]
128 block_dict
[parameter_name
] = self
.parse_parameter_value(parameter_value
)
129 self
.pos
+= semicolon_idx
+1
131 def parse_block_body(self
, block_dict
):
133 if self
.stream().find('}') == 0:
138 semicolon_idx
= self
.stream().find(';')
139 lbracket_idx
= self
.stream().find('{')
140 is_semicolon
= (semicolon_idx
!= -1)
141 is_lbracket
= (lbracket_idx
!= -1)
142 is_semicolon_lt_lbracket
= (semicolon_idx
< lbracket_idx
)
144 if is_semicolon
and ((is_lbracket
and is_semicolon_lt_lbracket
) or not is_lbracket
):
145 self
.parse_stanza(block_dict
)
146 elif is_lbracket
and ((is_semicolon
and not is_semicolon_lt_lbracket
) or
148 if '_blocks_' not in block_dict
:
149 block_dict
['_blocks_'] = []
150 block_dict
['_blocks_'].append(self
.parse_block_or_section())
152 raise Exception("Malformed stanza: no semicolon found.")
154 if last_pos
== self
.pos
:
155 raise Exception("Infinite loop while parsing block content")
158 self
.remove_whitespaces_quotes()
161 blocks
.append(self
.parse_block_or_section())
165 def _indentation(depth
, size
=4):
167 for _
in range(0, depth
*size
):
172 def write_block_body(block
, depth
=0):
173 def format_val(key
, val
):
174 if isinstance(val
, list):
175 return ', '.join([format_val(key
, v
) for v
in val
])
176 if isinstance(val
, bool):
177 return str(val
).lower()
178 if isinstance(val
, int) or (block
['block_name'] == 'CLIENT'
179 and key
== 'clients'):
180 return '{}'.format(val
)
181 return '"{}"'.format(val
)
184 for key
, val
in block
.items():
185 if key
== 'block_name':
187 elif key
== '_blocks_':
189 conf_str
+= GaneshaConfParser
.write_block(blo
, depth
)
191 conf_str
+= GaneshaConfParser
._indentation
(depth
)
192 conf_str
+= '{} = {};\n'.format(key
, format_val(key
, val
))
196 def write_block(block
, depth
=0):
197 if block
['block_name'] == "%url":
198 return '%url "{}"\n\n'.format(block
['value'])
201 conf_str
+= GaneshaConfParser
._indentation
(depth
)
202 conf_str
+= format(block
['block_name'])
204 conf_str
+= GaneshaConfParser
.write_block_body(block
, depth
+1)
205 conf_str
+= GaneshaConfParser
._indentation
(depth
)
211 def __init__(self
, name
, user_id
=None, fs_name
=None, sec_label_xattr
=None,
214 self
.fs_name
= fs_name
215 self
.user_id
= user_id
216 self
.sec_label_xattr
= sec_label_xattr
217 self
.cephx_key
= cephx_key
220 def from_fsal_block(cls
, fsal_block
):
221 return cls(fsal_block
['name'],
222 fsal_block
.get('user_id', None),
223 fsal_block
.get('filesystem', None),
224 fsal_block
.get('sec_label_xattr', None),
225 fsal_block
.get('secret_access_key', None))
227 def to_fsal_block(self
):
229 'block_name': 'FSAL',
233 result
['user_id'] = self
.user_id
235 result
['filesystem'] = self
.fs_name
236 if self
.sec_label_xattr
:
237 result
['sec_label_xattr'] = self
.sec_label_xattr
239 result
['secret_access_key'] = self
.cephx_key
243 def from_dict(cls
, fsal_dict
):
244 return cls(fsal_dict
['name'], fsal_dict
['user_id'],
245 fsal_dict
['fs_name'], fsal_dict
['sec_label_xattr'], None)
250 'user_id': self
.user_id
,
251 'fs_name': self
.fs_name
,
252 'sec_label_xattr': self
.sec_label_xattr
256 class Client(object):
257 def __init__(self
, addresses
, access_type
=None, squash
=None):
258 self
.addresses
= addresses
259 self
.access_type
= access_type
263 def from_client_block(cls
, client_block
):
264 addresses
= client_block
['clients']
265 if not isinstance(addresses
, list):
266 addresses
= [addresses
]
267 return cls(addresses
,
268 client_block
.get('access_type', None),
269 client_block
.get('squash', None))
271 def to_client_block(self
):
273 'block_name': 'CLIENT',
274 'clients': self
.addresses
,
277 result
['access_type'] = self
.access_type
279 result
['squash'] = self
.squash
283 def from_dict(cls
, client_dict
):
284 return cls(client_dict
['addresses'], client_dict
['access_type'],
285 client_dict
['squash'])
289 'addresses': self
.addresses
,
290 'access_type': self
.access_type
,
291 'squash': self
.squash
296 def __init__(self
, mgr
, namespace
):
298 self
.pool
= POOL_NAME
299 self
.namespace
= namespace
301 def _make_rados_url(self
, obj
):
302 return "rados://{}/{}/{}".format(self
.pool
, self
.namespace
, obj
)
304 def _create_url_block(self
, obj_name
):
305 return {'block_name': '%url', 'value': self
._make
_rados
_url
(obj_name
)}
307 def write_obj(self
, conf_block
, obj
, config_obj
=''):
309 conf_block
= GaneshaConfParser
.write_block(conf_block
)
311 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
312 ioctx
.set_namespace(self
.namespace
)
313 ioctx
.write_full(obj
, conf_block
.encode('utf-8'))
315 # Return after creating empty common config object
317 log
.debug("write configuration into rados object "
318 f
"{self.pool}/{self.namespace}/{obj}:\n{conf_block}")
320 # Add created obj url to common config obj
321 ioctx
.append(config_obj
, GaneshaConfParser
.write_block(
322 self
._create
_url
_block
(obj
)).encode('utf-8'))
323 FSExport
._check
_rados
_notify
(ioctx
, config_obj
)
324 log
.debug(f
"Added {obj} url to {config_obj}")
326 def remove_obj(self
, obj
, config_obj
):
327 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
328 ioctx
.set_namespace(self
.namespace
)
329 export_urls
= ioctx
.read(config_obj
)
330 url
= '%url "{}"\n\n'.format(self
._make
_rados
_url
(obj
))
331 export_urls
= export_urls
.replace(url
.encode('utf-8'), b
'')
332 ioctx
.remove_object(obj
)
333 ioctx
.write_full(config_obj
, export_urls
)
334 FSExport
._check
_rados
_notify
(ioctx
, config_obj
)
335 log
.debug("Object deleted: {}".format(url
))
337 def remove_all_obj(self
):
338 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
339 ioctx
.set_namespace(self
.namespace
)
340 for obj
in ioctx
.list_objects():
343 def check_user_config(self
):
344 with self
.mgr
.rados
.open_ioctx(self
.pool
) as ioctx
:
345 ioctx
.set_namespace(self
.namespace
)
346 for obj
in ioctx
.list_objects():
347 if obj
.key
.startswith("userconf-nfs"):
352 class Export(object):
353 # pylint: disable=R0902
354 def __init__(self
, export_id
, path
, fsal
, cluster_id
, pseudo
,
355 access_type
='R', clients
=None):
356 self
.export_id
= export_id
359 self
.cluster_id
= cluster_id
361 self
.access_type
= access_type
362 self
.squash
= 'no_root_squash'
363 self
.attr_expiration_time
= 0
364 self
.security_label
= True
366 self
.transports
= ["TCP"]
367 self
.clients
= clients
370 def from_export_block(cls
, export_block
, cluster_id
):
371 log
.debug("parsing export block: %s", export_block
)
373 fsal_block
= [b
for b
in export_block
['_blocks_']
374 if b
['block_name'] == "FSAL"]
376 client_blocks
= [b
for b
in export_block
['_blocks_']
377 if b
['block_name'] == "CLIENT"]
379 return cls(export_block
['export_id'],
380 export_block
['path'],
381 CephFSFSal
.from_fsal_block(fsal_block
[0]),
383 export_block
['pseudo'],
384 export_block
['access_type'],
385 [Client
.from_client_block(client
)
386 for client
in client_blocks
])
388 def to_export_block(self
):
389 # pylint: disable=too-many-branches
391 'block_name': 'EXPORT',
392 'export_id': self
.export_id
,
394 'pseudo': self
.pseudo
,
395 'access_type': self
.access_type
,
396 'squash': self
.squash
,
397 'attr_expiration_time': self
.attr_expiration_time
,
398 'security_label': self
.security_label
,
399 'protocols': self
.protocols
,
400 'transports': self
.transports
,
402 result
['_blocks_'] = [self
.fsal
.to_fsal_block()]
403 result
['_blocks_'].extend([client
.to_client_block()
404 for client
in self
.clients
])
408 def from_dict(cls
, export_id
, ex_dict
):
409 return cls(export_id
,
411 CephFSFSal
.from_dict(ex_dict
['fsal']),
412 ex_dict
['cluster_id'],
414 ex_dict
['access_type'],
415 [Client
.from_dict(client
) for client
in ex_dict
['clients']])
419 'export_id': self
.export_id
,
421 'cluster_id': self
.cluster_id
,
422 'pseudo': self
.pseudo
,
423 'access_type': self
.access_type
,
424 'squash': self
.squash
,
425 'security_label': self
.security_label
,
426 'protocols': sorted([p
for p
in self
.protocols
]),
427 'transports': sorted([t
for t
in self
.transports
]),
428 'fsal': self
.fsal
.to_dict(),
429 'clients': [client
.to_dict() for client
in self
.clients
]
433 class FSExport(object):
434 def __init__(self
, mgr
, namespace
=None):
436 self
.rados_pool
= POOL_NAME
437 self
.rados_namespace
= namespace
441 def _check_rados_notify(ioctx
, obj
):
445 log
.exception(f
"Ganesha timed out")
449 if self
._exports
is None:
451 log
.info("Begin export parsing")
452 for cluster_id
in available_clusters(self
.mgr
):
453 self
.export_conf_objs
= [] # type: List[Export]
454 self
._read
_raw
_config
(cluster_id
)
455 self
.exports
[cluster_id
] = self
.export_conf_objs
456 log
.info(f
"Exports parsed successfully {self.exports.items()}")
459 def _fetch_export(self
, pseudo_path
):
461 for ex
in self
.exports
[self
.rados_namespace
]:
462 if ex
.pseudo
== pseudo_path
:
467 def _create_user_key(self
, entity
, path
, fs_name
):
468 osd_cap
= 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
469 self
.rados_pool
, self
.rados_namespace
, fs_name
)
471 ret
, out
, err
= self
.mgr
.check_mon_command({
472 'prefix': 'auth get-or-create',
473 'entity': 'client.{}'.format(entity
),
474 'caps': ['mon', 'allow r', 'osd', osd_cap
, 'mds', 'allow rw path={}'.format(path
)],
478 json_res
= json
.loads(out
)
479 log
.info("Export user created is {}".format(json_res
[0]['entity']))
480 return json_res
[0]['entity'], json_res
[0]['key']
482 def _delete_user(self
, entity
):
483 self
.mgr
.check_mon_command({
485 'entity': 'client.{}'.format(entity
),
487 log
.info(f
"Export user deleted is {entity}")
489 def _gen_export_id(self
):
490 exports
= sorted([ex
.export_id
for ex
in self
.exports
[self
.rados_namespace
]])
499 def _read_raw_config(self
, rados_namespace
):
500 with self
.mgr
.rados
.open_ioctx(self
.rados_pool
) as ioctx
:
501 ioctx
.set_namespace(rados_namespace
)
502 for obj
in ioctx
.list_objects():
503 if obj
.key
.startswith("export-"):
505 raw_config
= obj
.read(size
)
506 raw_config
= raw_config
.decode("utf-8")
507 log
.debug("read export configuration from rados "
508 "object %s/%s/%s:\n%s", self
.rados_pool
,
509 rados_namespace
, obj
.key
, raw_config
)
510 self
.export_conf_objs
.append(Export
.from_export_block(
511 GaneshaConfParser(raw_config
).parse()[0], rados_namespace
))
513 def _save_export(self
, export
):
514 self
.exports
[self
.rados_namespace
].append(export
)
515 NFSRados(self
.mgr
, self
.rados_namespace
).write_obj(export
.to_export_block(),
516 f
'export-{export.export_id}', f
'conf-nfs.ganesha-{export.cluster_id}')
518 def _delete_export(self
, cluster_id
, pseudo_path
, export_obj
=None):
523 export
= self
._fetch
_export
(pseudo_path
)
527 NFSRados(self
.mgr
, self
.rados_namespace
).remove_obj(
528 f
'export-{export.export_id}', f
'conf-nfs.ganesha-{cluster_id}')
529 self
.exports
[cluster_id
].remove(export
)
530 self
._delete
_user
(export
.fsal
.user_id
)
531 if not self
.exports
[cluster_id
]:
532 del self
.exports
[cluster_id
]
533 return 0, "Successfully deleted export", ""
534 return 0, "", "Export does not exist"
535 except Exception as e
:
536 log
.exception(f
"Failed to delete {pseudo_path} export for {cluster_id}")
537 return getattr(e
, 'errno', -1), "", str(e
)
539 def format_path(self
, path
):
541 path
= normpath(path
.strip())
546 def check_fs(self
, fs_name
):
547 fs_map
= self
.mgr
.get('fs_map')
548 return fs_name
in [fs
['mdsmap']['fs_name'] for fs
in fs_map
['filesystems']]
550 @export_cluster_checker
551 def create_export(self
, fs_name
, cluster_id
, pseudo_path
, read_only
, path
):
553 if not self
.check_fs(fs_name
):
554 return -errno
.ENOENT
, "", f
"filesystem {fs_name} not found"
556 pseudo_path
= self
.format_path(pseudo_path
)
557 if not isabs(pseudo_path
) or pseudo_path
== "/":
558 return -errno
.EINVAL
, "", f
"pseudo path {pseudo_path} is invalid. "\
559 "It should be an absolute path and it cannot be just '/'."
561 if cluster_id
not in self
.exports
:
562 self
.exports
[cluster_id
] = []
564 if not self
._fetch
_export
(pseudo_path
):
565 ex_id
= self
._gen
_export
_id
()
566 user_id
= f
"{cluster_id}{ex_id}"
567 user_out
, key
= self
._create
_user
_key
(user_id
, path
, fs_name
)
572 'path': self
.format_path(path
),
573 'pseudo': pseudo_path
,
574 'cluster_id': cluster_id
,
575 'access_type': access_type
,
576 'fsal': {"name": "CEPH", "user_id": user_id
,
577 "fs_name": fs_name
, "sec_label_xattr": ""},
580 export
= Export
.from_dict(ex_id
, ex_dict
)
581 export
.fsal
.cephx_key
= key
582 self
._save
_export
(export
)
587 "cluster": cluster_id
,
590 return (0, json
.dumps(result
, indent
=4), '')
591 return 0, "", "Export already exists"
592 except Exception as e
:
593 log
.exception(f
"Failed to create {pseudo_path} export for {cluster_id}")
594 return -errno
.EINVAL
, "", str(e
)
596 @export_cluster_checker
597 def delete_export(self
, cluster_id
, pseudo_path
):
598 return self
._delete
_export
(cluster_id
, pseudo_path
)
600 def delete_all_exports(self
, cluster_id
):
602 export_list
= list(self
.exports
[cluster_id
])
604 log
.info("No exports to delete")
606 self
.rados_namespace
= cluster_id
607 for export
in export_list
:
608 ret
, out
, err
= self
._delete
_export
(cluster_id
=cluster_id
, pseudo_path
=None,
611 raise Exception(f
"Failed to delete exports: {err} and {ret}")
612 log
.info(f
"All exports successfully deleted for cluster id: {cluster_id}")
614 @export_cluster_checker
615 def list_exports(self
, cluster_id
, detailed
):
618 result
= [export
.to_dict() for export
in self
.exports
[cluster_id
]]
620 result
= [export
.pseudo
for export
in self
.exports
[cluster_id
]]
621 return 0, json
.dumps(result
, indent
=2), ''
623 log
.warning(f
"No exports to list for {cluster_id}")
625 except Exception as e
:
626 log
.exception(f
"Failed to list exports for {cluster_id}")
627 return getattr(e
, 'errno', -1), "", str(e
)
629 @export_cluster_checker
630 def get_export(self
, cluster_id
, pseudo_path
):
632 export
= self
._fetch
_export
(pseudo_path
)
634 return 0, json
.dumps(export
.to_dict(), indent
=2), ''
635 log
.warning(f
"No {pseudo_path} export to show for {cluster_id}")
637 except Exception as e
:
638 log
.exception(f
"Failed to get {pseudo_path} export for {cluster_id}")
639 return getattr(e
, 'errno', -1), "", str(e
)
643 def __init__(self
, mgr
):
644 self
.pool_name
= POOL_NAME
648 def _set_cluster_id(self
, cluster_id
):
649 self
.cluster_id
= f
"ganesha-{cluster_id}"
651 def _set_pool_namespace(self
, cluster_id
):
652 self
.pool_ns
= cluster_id
654 def _get_common_conf_obj_name(self
):
655 return f
'conf-nfs.{self.cluster_id}'
657 def _get_user_conf_obj_name(self
):
658 return f
'userconf-nfs.{self.cluster_id}'
660 def _call_orch_apply_nfs(self
, placement
):
661 spec
= NFSServiceSpec(service_type
='nfs', service_id
=self
.cluster_id
,
662 pool
=self
.pool_name
, namespace
=self
.pool_ns
,
663 placement
=PlacementSpec
.from_string(placement
))
664 completion
= self
.mgr
.apply_nfs(spec
)
665 self
.mgr
._orchestrator
_wait
([completion
])
666 orchestrator
.raise_if_exception(completion
)
668 def create_empty_rados_obj(self
):
669 common_conf
= self
._get
_common
_conf
_obj
_name
()
670 NFSRados(self
.mgr
, self
.pool_ns
).write_obj('', self
._get
_common
_conf
_obj
_name
())
671 log
.info(f
"Created empty object:{common_conf}")
673 def delete_config_obj(self
):
674 NFSRados(self
.mgr
, self
.pool_ns
).remove_all_obj()
675 log
.info(f
"Deleted {self._get_common_conf_obj_name()} object and all objects in "
678 def _restart_nfs_service(self
):
679 completion
= self
.mgr
.service_action(action
='restart',
680 service_name
='nfs.'+self
.cluster_id
)
681 self
.mgr
._orchestrator
_wait
([completion
])
682 orchestrator
.raise_if_exception(completion
)
685 def create_nfs_cluster(self
, export_type
, cluster_id
, placement
):
686 if export_type
!= 'cephfs':
687 return -errno
.EINVAL
, "", f
"Invalid export type: {export_type}"
689 pool_list
= [p
['pool_name'] for p
in self
.mgr
.get_osdmap().dump().get('pools', [])]
691 if self
.pool_name
not in pool_list
:
692 r
, out
, err
= create_pool(self
.mgr
, self
.pool_name
)
695 log
.info(f
"Pool Status: {out}")
697 self
.mgr
.check_mon_command({'prefix': 'osd pool application enable',
698 'pool': self
.pool_name
, 'app': 'nfs'})
700 self
.create_empty_rados_obj()
702 if cluster_id
not in available_clusters(self
.mgr
):
703 self
._call
_orch
_apply
_nfs
(placement
)
704 return 0, "NFS Cluster Created Successfully", ""
705 return 0, "", f
"{cluster_id} cluster already exists"
706 except Exception as e
:
707 log
.exception(f
"NFS Cluster {cluster_id} could not be created")
708 return getattr(e
, 'errno', -1), "", str(e
)
711 def update_nfs_cluster(self
, cluster_id
, placement
):
713 if cluster_id
in available_clusters(self
.mgr
):
714 self
._call
_orch
_apply
_nfs
(placement
)
715 return 0, "NFS Cluster Updated Successfully", ""
716 return -errno
.ENOENT
, "", "Cluster does not exist"
717 except Exception as e
:
718 log
.exception(f
"NFS Cluster {cluster_id} could not be updated")
719 return getattr(e
, 'errno', -1), "", str(e
)
722 def delete_nfs_cluster(self
, cluster_id
):
724 cluster_list
= available_clusters(self
.mgr
)
725 if cluster_id
in cluster_list
:
726 self
.mgr
.fs_export
.delete_all_exports(cluster_id
)
727 completion
= self
.mgr
.remove_service('nfs.' + self
.cluster_id
)
728 self
.mgr
._orchestrator
_wait
([completion
])
729 orchestrator
.raise_if_exception(completion
)
730 self
.delete_config_obj()
731 return 0, "NFS Cluster Deleted Successfully", ""
732 return 0, "", "Cluster does not exist"
733 except Exception as e
:
734 log
.exception(f
"Failed to delete NFS Cluster {cluster_id}")
735 return getattr(e
, 'errno', -1), "", str(e
)
737 def list_nfs_cluster(self
):
739 return 0, '\n'.join(available_clusters(self
.mgr
)), ""
740 except Exception as e
:
741 log
.exception("Failed to list NFS Cluster")
742 return getattr(e
, 'errno', -1), "", str(e
)
744 def _show_nfs_cluster_info(self
, cluster_id
):
745 self
._set
_cluster
_id
(cluster_id
)
746 completion
= self
.mgr
.list_daemons(daemon_type
='nfs')
747 self
.mgr
._orchestrator
_wait
([completion
])
748 orchestrator
.raise_if_exception(completion
)
750 # Here completion.result is a list DaemonDescription objects
751 for cluster
in completion
.result
:
752 if self
.cluster_id
== cluster
.service_id():
754 getaddrinfo sample output: [(<AddressFamily.AF_INET: 2>,
755 <SocketKind.SOCK_STREAM: 1>, 6, 'xyz', ('172.217.166.98',2049)),
756 (<AddressFamily.AF_INET6: 10>, <SocketKind.SOCK_STREAM: 1>, 6, '',
757 ('2404:6800:4009:80d::200e', 2049, 0, 0))]
761 "hostname": cluster
.hostname
,
762 "ip": list(set([ip
[4][0] for ip
in socket
.getaddrinfo(
763 cluster
.hostname
, 2049, flags
=socket
.AI_CANONNAME
,
764 type=socket
.SOCK_STREAM
)])),
765 "port": 2049 # Default ganesha port
767 except socket
.gaierror
:
771 def show_nfs_cluster_info(self
, cluster_id
=None):
776 cluster_ls
= [cluster_id
]
778 cluster_ls
= available_clusters(self
.mgr
)
780 for cluster_id
in cluster_ls
:
781 res
= self
._show
_nfs
_cluster
_info
(cluster_id
)
783 info_res
[cluster_id
] = res
784 return (0, json
.dumps(info_res
, indent
=4), '')
785 except Exception as e
:
786 log
.exception(f
"Failed to show info for cluster")
787 return getattr(e
, 'errno', -1), "", str(e
)
790 def set_nfs_cluster_config(self
, cluster_id
, nfs_config
):
793 return -errno
.EINVAL
, "", "Empty Config!!"
794 if cluster_id
in available_clusters(self
.mgr
):
795 rados_obj
= NFSRados(self
.mgr
, self
.pool_ns
)
796 if rados_obj
.check_user_config():
797 return 0, "", "NFS-Ganesha User Config already exists"
798 rados_obj
.write_obj(nfs_config
, self
._get
_user
_conf
_obj
_name
(),
799 self
._get
_common
_conf
_obj
_name
())
800 self
._restart
_nfs
_service
()
801 return 0, "NFS-Ganesha Config Set Successfully", ""
802 return -errno
.ENOENT
, "", "Cluster does not exist"
803 except Exception as e
:
804 log
.exception(f
"Setting NFS-Ganesha Config failed for {cluster_id}")
805 return getattr(e
, 'errno', -1), "", str(e
)
808 def reset_nfs_cluster_config(self
, cluster_id
):
810 if cluster_id
in available_clusters(self
.mgr
):
811 rados_obj
= NFSRados(self
.mgr
, self
.pool_ns
)
812 if not rados_obj
.check_user_config():
813 return 0, "", "NFS-Ganesha User Config does not exist"
814 rados_obj
.remove_obj(self
._get
_user
_conf
_obj
_name
(),
815 self
._get
_common
_conf
_obj
_name
())
816 self
._restart
_nfs
_service
()
817 return 0, "NFS-Ganesha Config Reset Successfully", ""
818 return -errno
.ENOENT
, "", "Cluster does not exist"
819 except Exception as e
:
820 log
.exception(f
"Resetting NFS-Ganesha Config failed for {cluster_id}")
821 return getattr(e
, 'errno', -1), "", str(e
)