]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/snap_schedule/module.py
a1a34e08b07aceb0d1a958d68149120483547859
[ceph.git] / ceph / src / pybind / mgr / snap_schedule / module.py
1 """
2 Copyright (C) 2019 SUSE
3
4 LGPL2.1. See file COPYING.
5 """
6 import errno
7 import json
8 import sqlite3
9 from typing import Sequence, Optional
10 from .fs.schedule_client import SnapSchedClient
11 from mgr_module import MgrModule, CLIReadCommand, CLIWriteCommand, Option
12 from mgr_util import CephfsConnectionException
13 from threading import Event
14
15
16 class Module(MgrModule):
17 MODULE_OPTIONS = [
18 Option(
19 'allow_m_granularity',
20 type='bool',
21 default=False,
22 desc='allow minute scheduled snapshots',
23 runtime=True,
24 ),
25 ]
26
27 def __init__(self, *args, **kwargs):
28 super(Module, self).__init__(*args, **kwargs)
29 self._initialized = Event()
30 self.client = SnapSchedClient(self)
31
32 def resolve_subvolume_path(self, fs, subvol, path):
33 if not subvol:
34 return path
35
36 rc, subvol_path, err = self.remote('fs', 'subvolume', 'getpath',
37 fs, subvol)
38 if rc != 0:
39 # TODO custom exception?
40 raise Exception(f'Could not resolve {path} in {fs}, {subvol}')
41 return subvol_path + path
42
43 @property
44 def default_fs(self):
45 fs_map = self.get('fs_map')
46 if fs_map['filesystems']:
47 return fs_map['filesystems'][0]['mdsmap']['fs_name']
48 else:
49 self.log.error('No filesystem instance could be found.')
50 raise CephfsConnectionException(
51 -errno.ENOENT, "no filesystem found")
52
53 def serve(self):
54 self._initialized.set()
55
56 def handle_command(self, inbuf, cmd):
57 self._initialized.wait()
58 return -errno.EINVAL, "", "Unknown command"
59
60 @CLIReadCommand('fs snap-schedule status')
61 def snap_schedule_get(self,
62 path: Optional[str] = '/',
63 subvol: Optional[str] = None,
64 fs: Optional[str] = None,
65 format: Optional[str] = 'plain'):
66 '''
67 List current snapshot schedules
68 '''
69 use_fs = fs if fs else self.default_fs
70 try:
71 ret_scheds = self.client.get_snap_schedules(use_fs, path)
72 except CephfsConnectionException as e:
73 return e.to_tuple()
74 if format == 'json':
75 json_report = ','.join([ret_sched.report_json() for ret_sched in ret_scheds])
76 return 0, f'{json_report}', ''
77 return 0, '\n===\n'.join([ret_sched.report() for ret_sched in ret_scheds]), ''
78
79 @CLIReadCommand('fs snap-schedule list')
80 def snap_schedule_list(self, path: str,
81 subvol: Optional[str] = None,
82 recursive: Optional[bool] = False,
83 fs: Optional[str] = None,
84 format: Optional[str] = 'plain'):
85 '''
86 Get current snapshot schedule for <path>
87 '''
88 try:
89 use_fs = fs if fs else self.default_fs
90 scheds = self.client.list_snap_schedules(use_fs, path, recursive)
91 self.log.debug(f'recursive is {recursive}')
92 except CephfsConnectionException as e:
93 return e.to_tuple()
94 if not scheds:
95 return -errno.ENOENT, '', f'SnapSchedule for {path} not found'
96 if format == 'json':
97 # json_list = ','.join([sched.json_list() for sched in scheds])
98 schedule_list = [sched.schedule for sched in scheds]
99 retention_list = [sched.retention for sched in scheds]
100 out = {'path': path, 'schedule': schedule_list, 'retention': retention_list}
101 return 0, json.dumps(out), ''
102 return 0, '\n'.join([str(sched) for sched in scheds]), ''
103
104 @CLIWriteCommand('fs snap-schedule add')
105 def snap_schedule_add(self,
106 path: str,
107 snap_schedule: Optional[str],
108 start: Optional[str] = None,
109 fs: Optional[str] = None,
110 subvol: Optional[str] = None):
111 '''
112 Set a snapshot schedule for <path>
113 '''
114 try:
115 use_fs = fs if fs else self.default_fs
116 abs_path = self.resolve_subvolume_path(fs, subvol, path)
117 self.client.store_snap_schedule(use_fs,
118 abs_path,
119 (abs_path, snap_schedule,
120 use_fs, path, start, subvol))
121 suc_msg = f'Schedule set for path {path}'
122 except sqlite3.IntegrityError:
123 existing_scheds = self.client.get_snap_schedules(use_fs, path)
124 report = [s.report() for s in existing_scheds]
125 error_msg = f'Found existing schedule {report}'
126 self.log.error(error_msg)
127 return -errno.EEXIST, '', error_msg
128 except ValueError as e:
129 return -errno.ENOENT, '', str(e)
130 except CephfsConnectionException as e:
131 return e.to_tuple()
132 return 0, suc_msg, ''
133
134 @CLIWriteCommand('fs snap-schedule remove')
135 def snap_schedule_rm(self,
136 path: str,
137 repeat: Optional[str] = None,
138 start: Optional[str] = None,
139 subvol: Optional[str] = None,
140 fs: Optional[str] = None):
141 '''
142 Remove a snapshot schedule for <path>
143 '''
144 try:
145 use_fs = fs if fs else self.default_fs
146 abs_path = self.resolve_subvolume_path(fs, subvol, path)
147 self.client.rm_snap_schedule(use_fs, abs_path, repeat, start)
148 except CephfsConnectionException as e:
149 return e.to_tuple()
150 except ValueError as e:
151 return -errno.ENOENT, '', str(e)
152 return 0, 'Schedule removed for path {}'.format(path), ''
153
154 @CLIWriteCommand('fs snap-schedule retention add')
155 def snap_schedule_retention_add(self,
156 path: str,
157 retention_spec_or_period: str,
158 retention_count: Optional[str] = None,
159 fs: Optional[str] = None,
160 subvol: Optional[str] = None):
161 '''
162 Set a retention specification for <path>
163 '''
164 try:
165 use_fs = fs if fs else self.default_fs
166 abs_path = self.resolve_subvolume_path(fs, subvol, path)
167 self.client.add_retention_spec(use_fs, abs_path,
168 retention_spec_or_period,
169 retention_count)
170 except CephfsConnectionException as e:
171 return e.to_tuple()
172 except ValueError as e:
173 return -errno.ENOENT, '', str(e)
174 return 0, 'Retention added to path {}'.format(path), ''
175
176 @CLIWriteCommand('fs snap-schedule retention remove')
177 def snap_schedule_retention_rm(self,
178 path: str,
179 retention_spec_or_period: str,
180 retention_count: Optional[str] = None,
181 fs: Optional[str] = None,
182 subvol: Optional[str] = None):
183 '''
184 Remove a retention specification for <path>
185 '''
186 try:
187 use_fs = fs if fs else self.default_fs
188 abs_path = self.resolve_subvolume_path(fs, subvol, path)
189 self.client.rm_retention_spec(use_fs, abs_path,
190 retention_spec_or_period,
191 retention_count)
192 except CephfsConnectionException as e:
193 return e.to_tuple()
194 except ValueError as e:
195 return -errno.ENOENT, '', str(e)
196 return 0, 'Retention removed from path {}'.format(path), ''
197
198 @CLIWriteCommand('fs snap-schedule activate')
199 def snap_schedule_activate(self,
200 path: str,
201 repeat: Optional[str] = None,
202 start: Optional[str] = None,
203 subvol: Optional[str] = None,
204 fs: Optional[str] = None):
205 '''
206 Activate a snapshot schedule for <path>
207 '''
208 try:
209 use_fs = fs if fs else self.default_fs
210 abs_path = self.resolve_subvolume_path(fs, subvol, path)
211 self.client.activate_snap_schedule(use_fs, abs_path, repeat, start)
212 except CephfsConnectionException as e:
213 return e.to_tuple()
214 except ValueError as e:
215 return -errno.ENOENT, '', str(e)
216 return 0, 'Schedule activated for path {}'.format(path), ''
217
218 @CLIWriteCommand('fs snap-schedule deactivate')
219 def snap_schedule_deactivate(self,
220 path: str,
221 repeat: Optional[str] = None,
222 start: Optional[str] = None,
223 subvol: Optional[str] = None,
224 fs: Optional[str] = None):
225 '''
226 Deactivate a snapshot schedule for <path>
227 '''
228 try:
229 use_fs = fs if fs else self.default_fs
230 abs_path = self.resolve_subvolume_path(fs, subvol, path)
231 self.client.deactivate_snap_schedule(use_fs, abs_path, repeat, start)
232 except CephfsConnectionException as e:
233 return e.to_tuple()
234 except ValueError as e:
235 return -errno.ENOENT, '', str(e)
236 return 0, 'Schedule deactivated for path {}'.format(path), ''