self.client = SnapSchedClient(self)
@property
- def default_fs(self) -> str:
+ def _default_fs(self) -> Tuple[int, str, str]:
fs_map = self.get('fs_map')
- if fs_map['filesystems']:
- return fs_map['filesystems'][0]['mdsmap']['fs_name']
+ if len(fs_map['filesystems']) > 1:
+ return -errno.EINVAL, '', "filesystem argument is required when there is more than one file system"
+ elif len(fs_map['filesystems']) == 1:
+ return 0, fs_map['filesystems'][0]['mdsmap']['fs_name'], "Success"
else:
self.log.error('No filesystem instance could be found.')
- raise CephfsConnectionException(
- -errno.ENOENT, "no filesystem found")
+ return -errno.ENOENT, "", "no filesystem found"
+
+ def _validate_fs(self, fs: Optional[str]) -> Tuple[int, str, str]:
+ if not fs:
+ rc, fs, err = self._default_fs
+ if rc < 0:
+ return rc, fs, err
+ if not self.has_fs(fs):
+ return -errno.EINVAL, '', f"no such file system: {fs}"
+ return 0, fs, 'Success'
def has_fs(self, fs_name: str) -> bool:
return fs_name in self.client.get_all_filesystems()
'''
List current snapshot schedules
'''
- use_fs = fs if fs else self.default_fs
- if not self.has_fs(use_fs):
- return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
+ rc, fs, err = self._validate_fs(fs)
+ if rc < 0:
+ return rc, fs, err
try:
- ret_scheds = self.client.get_snap_schedules(use_fs, path)
+ ret_scheds = self.client.get_snap_schedules(fs, path)
except CephfsConnectionException as e:
return e.to_tuple()
if format == 'json':
'''
Get current snapshot schedule for <path>
'''
+ rc, fs, err = self._validate_fs(fs)
+ if rc < 0:
+ return rc, fs, err
try:
- use_fs = fs if fs else self.default_fs
- if not self.has_fs(use_fs):
- return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
- scheds = self.client.list_snap_schedules(use_fs, path, recursive)
+ scheds = self.client.list_snap_schedules(fs, path, recursive)
self.log.debug(f'recursive is {recursive}')
except CephfsConnectionException as e:
return e.to_tuple()
'''
Set a snapshot schedule for <path>
'''
+ rc, fs, err = self._validate_fs(fs)
+ if rc < 0:
+ return rc, fs, err
try:
- use_fs = fs if fs else self.default_fs
- if not self.has_fs(use_fs):
- return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
subvol = None
- self.client.store_snap_schedule(use_fs,
+ self.client.store_snap_schedule(fs,
abs_path,
(abs_path, snap_schedule,
- use_fs, path, start, subvol))
+ fs, path, start, subvol))
suc_msg = f'Schedule set for path {path}'
except sqlite3.IntegrityError:
- existing_scheds = self.client.get_snap_schedules(use_fs, path)
+ existing_scheds = self.client.get_snap_schedules(fs, path)
report = [s.report() for s in existing_scheds]
error_msg = f'Found existing schedule {report}'
self.log.error(error_msg)
'''
Remove a snapshot schedule for <path>
'''
+ rc, fs, err = self._validate_fs(fs)
+ if rc < 0:
+ return rc, fs, err
try:
- use_fs = fs if fs else self.default_fs
- if not self.has_fs(use_fs):
- return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
- self.client.rm_snap_schedule(use_fs, abs_path, repeat, start)
- except CephfsConnectionException as e:
- return e.to_tuple()
+ self.client.rm_snap_schedule(fs, abs_path, repeat, start)
except ValueError as e:
return -errno.ENOENT, '', str(e)
+ except CephfsConnectionException as e:
+ return e.to_tuple()
return 0, 'Schedule removed for path {}'.format(path), ''
@CLIWriteCommand('fs snap-schedule retention add')
'''
Set a retention specification for <path>
'''
+ rc, fs, err = self._validate_fs(fs)
+ if rc < 0:
+ return rc, fs, err
try:
- use_fs = fs if fs else self.default_fs
- if not self.has_fs(use_fs):
- return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
- self.client.add_retention_spec(use_fs, abs_path,
- retention_spec_or_period,
- retention_count)
- except CephfsConnectionException as e:
- return e.to_tuple()
+ self.client.add_retention_spec(fs, abs_path,
+ retention_spec_or_period,
+ retention_count)
except ValueError as e:
return -errno.ENOENT, '', str(e)
+ except CephfsConnectionException as e:
+ return e.to_tuple()
return 0, 'Retention added to path {}'.format(path), ''
@CLIWriteCommand('fs snap-schedule retention remove')
'''
Remove a retention specification for <path>
'''
+ rc, fs, err = self._validate_fs(fs)
+ if rc < 0:
+ return rc, fs, err
try:
- use_fs = fs if fs else self.default_fs
- if not self.has_fs(use_fs):
- return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
- self.client.rm_retention_spec(use_fs, abs_path,
+ self.client.rm_retention_spec(fs, abs_path,
retention_spec_or_period,
retention_count)
except CephfsConnectionException as e:
'''
Activate a snapshot schedule for <path>
'''
+ rc, fs, err = self._validate_fs(fs)
+ if rc < 0:
+ return rc, fs, err
try:
- use_fs = fs if fs else self.default_fs
- if not self.has_fs(use_fs):
- return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
- self.client.activate_snap_schedule(use_fs, abs_path, repeat, start)
- except CephfsConnectionException as e:
- return e.to_tuple()
+ self.client.activate_snap_schedule(fs, abs_path, repeat, start)
except ValueError as e:
return -errno.ENOENT, '', str(e)
+ except CephfsConnectionException as e:
+ return e.to_tuple()
return 0, 'Schedule activated for path {}'.format(path), ''
@CLIWriteCommand('fs snap-schedule deactivate')
'''
Deactivate a snapshot schedule for <path>
'''
+ rc, fs, err = self._validate_fs(fs)
+ if rc < 0:
+ return rc, fs, err
try:
- use_fs = fs if fs else self.default_fs
- if not self.has_fs(use_fs):
- return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
- self.client.deactivate_snap_schedule(use_fs, abs_path, repeat, start)
- except CephfsConnectionException as e:
- return e.to_tuple()
+ self.client.deactivate_snap_schedule(fs, abs_path, repeat, start)
except ValueError as e:
return -errno.ENOENT, '', str(e)
+ except CephfsConnectionException as e:
+ return e.to_tuple()
return 0, 'Schedule deactivated for path {}'.format(path), ''