]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/volumes/fs/operations/volume.py
update ceph source to reef 18.2.1
[ceph.git] / ceph / src / pybind / mgr / volumes / fs / operations / volume.py
1 import errno
2 import logging
3 import os
4
5 from typing import List, Tuple
6
7 from contextlib import contextmanager
8
9 import orchestrator
10
11 from .lock import GlobalLock
12 from ..exception import VolumeException
13 from ..fs_util import create_pool, remove_pool, rename_pool, create_filesystem, \
14 remove_filesystem, rename_filesystem, create_mds, volume_exists, listdir
15 from .trash import Trash
16 from mgr_util import open_filesystem, CephfsConnectionException
17
18 log = logging.getLogger(__name__)
19
20 def gen_pool_names(volname):
21 """
22 return metadata and data pool name (from a filesystem/volume name) as a tuple
23 """
24 return "cephfs.{}.meta".format(volname), "cephfs.{}.data".format(volname)
25
26 def get_mds_map(mgr, volname):
27 """
28 return mdsmap for a volname
29 """
30 mds_map = None
31 fs_map = mgr.get("fs_map")
32 for f in fs_map['filesystems']:
33 if volname == f['mdsmap']['fs_name']:
34 return f['mdsmap']
35 return mds_map
36
37 def get_pool_names(mgr, volname):
38 """
39 return metadata and data pools (list) names of volume as a tuple
40 """
41 fs_map = mgr.get("fs_map")
42 metadata_pool_id = None
43 data_pool_ids = [] # type: List[int]
44 for f in fs_map['filesystems']:
45 if volname == f['mdsmap']['fs_name']:
46 metadata_pool_id = f['mdsmap']['metadata_pool']
47 data_pool_ids = f['mdsmap']['data_pools']
48 break
49 if metadata_pool_id is None:
50 return None, None
51
52 osdmap = mgr.get("osd_map")
53 pools = dict([(p['pool'], p['pool_name']) for p in osdmap['pools']])
54 metadata_pool = pools[metadata_pool_id]
55 data_pools = [pools[id] for id in data_pool_ids]
56 return metadata_pool, data_pools
57
58 def get_pool_ids(mgr, volname):
59 """
60 return metadata and data pools (list) id of volume as a tuple
61 """
62 fs_map = mgr.get("fs_map")
63 metadata_pool_id = None
64 data_pool_ids = [] # type: List[int]
65 for f in fs_map['filesystems']:
66 if volname == f['mdsmap']['fs_name']:
67 metadata_pool_id = f['mdsmap']['metadata_pool']
68 data_pool_ids = f['mdsmap']['data_pools']
69 break
70 if metadata_pool_id is None:
71 return None, None
72 return metadata_pool_id, data_pool_ids
73
74 def create_volume(mgr, volname, placement):
75 """
76 create volume (pool, filesystem and mds)
77 """
78 metadata_pool, data_pool = gen_pool_names(volname)
79 # create pools
80 r, outb, outs = create_pool(mgr, metadata_pool)
81 if r != 0:
82 return r, outb, outs
83 # default to a bulk pool for data. In case autoscaling has been disabled
84 # for the cluster with `ceph osd pool set noautoscale`, this will have no effect.
85 r, outb, outs = create_pool(mgr, data_pool, bulk=True)
86 if r != 0:
87 #cleanup
88 remove_pool(mgr, metadata_pool)
89 return r, outb, outs
90 # create filesystem
91 r, outb, outs = create_filesystem(mgr, volname, metadata_pool, data_pool)
92 if r != 0:
93 log.error("Filesystem creation error: {0} {1} {2}".format(r, outb, outs))
94 #cleanup
95 remove_pool(mgr, data_pool)
96 remove_pool(mgr, metadata_pool)
97 return r, outb, outs
98 return create_mds(mgr, volname, placement)
99
100
101 def delete_volume(mgr, volname, metadata_pool, data_pools):
102 """
103 delete the given module (tear down mds, remove filesystem, remove pools)
104 """
105 # Tear down MDS daemons
106 try:
107 completion = mgr.remove_service('mds.' + volname)
108 orchestrator.raise_if_exception(completion)
109 except (ImportError, orchestrator.OrchestratorError):
110 log.warning("OrchestratorError, not tearing down MDS daemons")
111 except Exception as e:
112 # Don't let detailed orchestrator exceptions (python backtraces)
113 # bubble out to the user
114 log.exception("Failed to tear down MDS daemons")
115 return -errno.EINVAL, "", str(e)
116
117 # In case orchestrator didn't tear down MDS daemons cleanly, or
118 # there was no orchestrator, we force the daemons down.
119 if volume_exists(mgr, volname):
120 r, outb, outs = remove_filesystem(mgr, volname)
121 if r != 0:
122 return r, outb, outs
123 else:
124 err = "Filesystem not found for volume '{0}'".format(volname)
125 log.warning(err)
126 return -errno.ENOENT, "", err
127 r, outb, outs = remove_pool(mgr, metadata_pool)
128 if r != 0:
129 return r, outb, outs
130
131 for data_pool in data_pools:
132 r, outb, outs = remove_pool(mgr, data_pool)
133 if r != 0:
134 return r, outb, outs
135 result_str = "metadata pool: {0} data pool: {1} removed".format(metadata_pool, str(data_pools))
136 return r, result_str, ""
137
138 def rename_volume(mgr, volname: str, newvolname: str) -> Tuple[int, str, str]:
139 """
140 rename volume (orch MDS service, file system, pools)
141 """
142 # To allow volume rename to be idempotent, check whether orch managed MDS
143 # service is already renamed. If so, skip renaming MDS service.
144 completion = None
145 rename_mds_service = True
146 try:
147 completion = mgr.describe_service(
148 service_type='mds', service_name=f"mds.{newvolname}", refresh=True)
149 orchestrator.raise_if_exception(completion)
150 except (ImportError, orchestrator.OrchestratorError):
151 log.warning("Failed to fetch orch service mds.%s", newvolname)
152 except Exception as e:
153 # Don't let detailed orchestrator exceptions (python backtraces)
154 # bubble out to the user
155 log.exception("Failed to fetch orch service mds.%s", newvolname)
156 return -errno.EINVAL, "", str(e)
157 if completion and completion.result:
158 rename_mds_service = False
159
160 # Launch new MDS service matching newvolname
161 completion = None
162 remove_mds_service = False
163 if rename_mds_service:
164 try:
165 completion = mgr.describe_service(
166 service_type='mds', service_name=f"mds.{volname}", refresh=True)
167 orchestrator.raise_if_exception(completion)
168 except (ImportError, orchestrator.OrchestratorError):
169 log.warning("Failed to fetch orch service mds.%s", volname)
170 except Exception as e:
171 # Don't let detailed orchestrator exceptions (python backtraces)
172 # bubble out to the user
173 log.exception("Failed to fetch orch service mds.%s", volname)
174 return -errno.EINVAL, "", str(e)
175 if completion and completion.result:
176 svc = completion.result[0]
177 placement = svc.spec.placement.pretty_str()
178 create_mds(mgr, newvolname, placement)
179 remove_mds_service = True
180
181 # rename_filesytem is idempotent
182 r, outb, outs = rename_filesystem(mgr, volname, newvolname)
183 if r != 0:
184 errmsg = f"Failed to rename file system '{volname}' to '{newvolname}'"
185 log.error("Failed to rename file system '%s' to '%s'", volname, newvolname)
186 outs = f'{errmsg}; {outs}'
187 return r, outb, outs
188
189 # Rename file system's metadata and data pools
190 metadata_pool, data_pools = get_pool_names(mgr, newvolname)
191
192 new_metadata_pool, new_data_pool = gen_pool_names(newvolname)
193 if metadata_pool != new_metadata_pool:
194 r, outb, outs = rename_pool(mgr, metadata_pool, new_metadata_pool)
195 if r != 0:
196 errmsg = f"Failed to rename metadata pool '{metadata_pool}' to '{new_metadata_pool}'"
197 log.error("Failed to rename metadata pool '%s' to '%s'", metadata_pool, new_metadata_pool)
198 outs = f'{errmsg}; {outs}'
199 return r, outb, outs
200
201 data_pool_rename_failed = False
202 # If file system has more than one data pool, then skip renaming
203 # the data pools, and proceed to remove the old MDS service.
204 if len(data_pools) > 1:
205 data_pool_rename_failed = True
206 else:
207 data_pool = data_pools[0]
208 if data_pool != new_data_pool:
209 r, outb, outs = rename_pool(mgr, data_pool, new_data_pool)
210 if r != 0:
211 errmsg = f"Failed to rename data pool '{data_pool}' to '{new_data_pool}'"
212 log.error("Failed to rename data pool '%s' to '%s'", data_pool, new_data_pool)
213 outs = f'{errmsg}; {outs}'
214 return r, outb, outs
215
216 # Tear down old MDS service
217 if remove_mds_service:
218 try:
219 completion = mgr.remove_service('mds.' + volname)
220 orchestrator.raise_if_exception(completion)
221 except (ImportError, orchestrator.OrchestratorError):
222 log.warning("Failed to tear down orch service mds.%s", volname)
223 except Exception as e:
224 # Don't let detailed orchestrator exceptions (python backtraces)
225 # bubble out to the user
226 log.exception("Failed to tear down orch service mds.%s", volname)
227 return -errno.EINVAL, "", str(e)
228
229 outb = f"FS volume '{volname}' renamed to '{newvolname}'"
230 if data_pool_rename_failed:
231 outb += ". But failed to rename data pools as more than one data pool was found."
232
233 return r, outb, ""
234
235 def list_volumes(mgr):
236 """
237 list all filesystem volumes.
238
239 :param: None
240 :return: None
241 """
242 result = []
243 fs_map = mgr.get("fs_map")
244 for f in fs_map['filesystems']:
245 result.append({'name': f['mdsmap']['fs_name']})
246 return result
247
248
249 def get_pending_subvol_deletions_count(fs, path):
250 """
251 Get the number of pending subvolumes deletions.
252 """
253 trashdir = os.path.join(path, Trash.GROUP_NAME)
254 try:
255 num_pending_subvol_del = len(listdir(fs, trashdir, filter_entries=None, filter_files=False))
256 except VolumeException as ve:
257 if ve.errno == -errno.ENOENT:
258 num_pending_subvol_del = 0
259
260 return {'pending_subvolume_deletions': num_pending_subvol_del}
261
262
263 @contextmanager
264 def open_volume(vc, volname):
265 """
266 open a volume for exclusive access. This API is to be used as a contextr
267 manager.
268
269 :param vc: volume client instance
270 :param volname: volume name
271 :return: yields a volume handle (ceph filesystem handle)
272 """
273 g_lock = GlobalLock()
274 with g_lock.lock_op():
275 try:
276 with open_filesystem(vc, volname) as fs_handle:
277 yield fs_handle
278 except CephfsConnectionException as ce:
279 raise VolumeException(ce.errno, ce.error_str)
280
281
282 @contextmanager
283 def open_volume_lockless(vc, volname):
284 """
285 open a volume with shared access. This API is to be used as a context
286 manager.
287
288 :param vc: volume client instance
289 :param volname: volume name
290 :return: yields a volume handle (ceph filesystem handle)
291 """
292 try:
293 with open_filesystem(vc, volname) as fs_handle:
294 yield fs_handle
295 except CephfsConnectionException as ce:
296 raise VolumeException(ce.errno, ce.error_str)