]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/volumes/fs/operations/volume.py
import ceph quincy 17.2.4
[ceph.git] / ceph / src / pybind / mgr / volumes / fs / operations / volume.py
1 import errno
2 import logging
3 import os
4
5 from typing import List, Tuple
6
7 from contextlib import contextmanager
8
9 import orchestrator
10
11 from .lock import GlobalLock
12 from ..exception import VolumeException
13 from ..fs_util import create_pool, remove_pool, rename_pool, create_filesystem, \
14 remove_filesystem, rename_filesystem, create_mds, volume_exists
15 from .trash import Trash
16 from mgr_util import open_filesystem, CephfsConnectionException
17
18 log = logging.getLogger(__name__)
19
20 def gen_pool_names(volname):
21 """
22 return metadata and data pool name (from a filesystem/volume name) as a tuple
23 """
24 return "cephfs.{}.meta".format(volname), "cephfs.{}.data".format(volname)
25
26 def get_mds_map(mgr, volname):
27 """
28 return mdsmap for a volname
29 """
30 mds_map = None
31 fs_map = mgr.get("fs_map")
32 for f in fs_map['filesystems']:
33 if volname == f['mdsmap']['fs_name']:
34 return f['mdsmap']
35 return mds_map
36
37 def get_pool_names(mgr, volname):
38 """
39 return metadata and data pools (list) names of volume as a tuple
40 """
41 fs_map = mgr.get("fs_map")
42 metadata_pool_id = None
43 data_pool_ids = [] # type: List[int]
44 for f in fs_map['filesystems']:
45 if volname == f['mdsmap']['fs_name']:
46 metadata_pool_id = f['mdsmap']['metadata_pool']
47 data_pool_ids = f['mdsmap']['data_pools']
48 break
49 if metadata_pool_id is None:
50 return None, None
51
52 osdmap = mgr.get("osd_map")
53 pools = dict([(p['pool'], p['pool_name']) for p in osdmap['pools']])
54 metadata_pool = pools[metadata_pool_id]
55 data_pools = [pools[id] for id in data_pool_ids]
56 return metadata_pool, data_pools
57
58 def get_pool_ids(mgr, volname):
59 """
60 return metadata and data pools (list) id of volume as a tuple
61 """
62 fs_map = mgr.get("fs_map")
63 metadata_pool_id = None
64 data_pool_ids = [] # type: List[int]
65 for f in fs_map['filesystems']:
66 if volname == f['mdsmap']['fs_name']:
67 metadata_pool_id = f['mdsmap']['metadata_pool']
68 data_pool_ids = f['mdsmap']['data_pools']
69 break
70 if metadata_pool_id is None:
71 return None, None
72 return metadata_pool_id, data_pool_ids
73
74 def create_volume(mgr, volname, placement):
75 """
76 create volume (pool, filesystem and mds)
77 """
78 metadata_pool, data_pool = gen_pool_names(volname)
79 # create pools
80 r, outb, outs = create_pool(mgr, metadata_pool)
81 if r != 0:
82 return r, outb, outs
83 r, outb, outs = create_pool(mgr, data_pool)
84 if r != 0:
85 #cleanup
86 remove_pool(mgr, metadata_pool)
87 return r, outb, outs
88 # create filesystem
89 r, outb, outs = create_filesystem(mgr, volname, metadata_pool, data_pool)
90 if r != 0:
91 log.error("Filesystem creation error: {0} {1} {2}".format(r, outb, outs))
92 #cleanup
93 remove_pool(mgr, data_pool)
94 remove_pool(mgr, metadata_pool)
95 return r, outb, outs
96 return create_mds(mgr, volname, placement)
97
98
99 def delete_volume(mgr, volname, metadata_pool, data_pools):
100 """
101 delete the given module (tear down mds, remove filesystem, remove pools)
102 """
103 # Tear down MDS daemons
104 try:
105 completion = mgr.remove_service('mds.' + volname)
106 orchestrator.raise_if_exception(completion)
107 except (ImportError, orchestrator.OrchestratorError):
108 log.warning("OrchestratorError, not tearing down MDS daemons")
109 except Exception as e:
110 # Don't let detailed orchestrator exceptions (python backtraces)
111 # bubble out to the user
112 log.exception("Failed to tear down MDS daemons")
113 return -errno.EINVAL, "", str(e)
114
115 # In case orchestrator didn't tear down MDS daemons cleanly, or
116 # there was no orchestrator, we force the daemons down.
117 if volume_exists(mgr, volname):
118 r, outb, outs = remove_filesystem(mgr, volname)
119 if r != 0:
120 return r, outb, outs
121 else:
122 err = "Filesystem not found for volume '{0}'".format(volname)
123 log.warning(err)
124 return -errno.ENOENT, "", err
125 r, outb, outs = remove_pool(mgr, metadata_pool)
126 if r != 0:
127 return r, outb, outs
128
129 for data_pool in data_pools:
130 r, outb, outs = remove_pool(mgr, data_pool)
131 if r != 0:
132 return r, outb, outs
133 result_str = "metadata pool: {0} data pool: {1} removed".format(metadata_pool, str(data_pools))
134 return r, result_str, ""
135
136 def rename_volume(mgr, volname: str, newvolname: str) -> Tuple[int, str, str]:
137 """
138 rename volume (orch MDS service, file system, pools)
139 """
140 # To allow volume rename to be idempotent, check whether orch managed MDS
141 # service is already renamed. If so, skip renaming MDS service.
142 completion = None
143 rename_mds_service = True
144 try:
145 completion = mgr.describe_service(
146 service_type='mds', service_name=f"mds.{newvolname}", refresh=True)
147 orchestrator.raise_if_exception(completion)
148 except (ImportError, orchestrator.OrchestratorError):
149 log.warning("Failed to fetch orch service mds.%s", newvolname)
150 except Exception as e:
151 # Don't let detailed orchestrator exceptions (python backtraces)
152 # bubble out to the user
153 log.exception("Failed to fetch orch service mds.%s", newvolname)
154 return -errno.EINVAL, "", str(e)
155 if completion and completion.result:
156 rename_mds_service = False
157
158 # Launch new MDS service matching newvolname
159 completion = None
160 remove_mds_service = False
161 if rename_mds_service:
162 try:
163 completion = mgr.describe_service(
164 service_type='mds', service_name=f"mds.{volname}", refresh=True)
165 orchestrator.raise_if_exception(completion)
166 except (ImportError, orchestrator.OrchestratorError):
167 log.warning("Failed to fetch orch service mds.%s", volname)
168 except Exception as e:
169 # Don't let detailed orchestrator exceptions (python backtraces)
170 # bubble out to the user
171 log.exception("Failed to fetch orch service mds.%s", volname)
172 return -errno.EINVAL, "", str(e)
173 if completion and completion.result:
174 svc = completion.result[0]
175 placement = svc.spec.placement.pretty_str()
176 create_mds(mgr, newvolname, placement)
177 remove_mds_service = True
178
179 # rename_filesytem is idempotent
180 r, outb, outs = rename_filesystem(mgr, volname, newvolname)
181 if r != 0:
182 errmsg = f"Failed to rename file system '{volname}' to '{newvolname}'"
183 log.error("Failed to rename file system '%s' to '%s'", volname, newvolname)
184 outs = f'{errmsg}; {outs}'
185 return r, outb, outs
186
187 # Rename file system's metadata and data pools
188 metadata_pool, data_pools = get_pool_names(mgr, newvolname)
189
190 new_metadata_pool, new_data_pool = gen_pool_names(newvolname)
191 if metadata_pool != new_metadata_pool:
192 r, outb, outs = rename_pool(mgr, metadata_pool, new_metadata_pool)
193 if r != 0:
194 errmsg = f"Failed to rename metadata pool '{metadata_pool}' to '{new_metadata_pool}'"
195 log.error("Failed to rename metadata pool '%s' to '%s'", metadata_pool, new_metadata_pool)
196 outs = f'{errmsg}; {outs}'
197 return r, outb, outs
198
199 data_pool_rename_failed = False
200 # If file system has more than one data pool, then skip renaming
201 # the data pools, and proceed to remove the old MDS service.
202 if len(data_pools) > 1:
203 data_pool_rename_failed = True
204 else:
205 data_pool = data_pools[0]
206 if data_pool != new_data_pool:
207 r, outb, outs = rename_pool(mgr, data_pool, new_data_pool)
208 if r != 0:
209 errmsg = f"Failed to rename data pool '{data_pool}' to '{new_data_pool}'"
210 log.error("Failed to rename data pool '%s' to '%s'", data_pool, new_data_pool)
211 outs = f'{errmsg}; {outs}'
212 return r, outb, outs
213
214 # Tear down old MDS service
215 if remove_mds_service:
216 try:
217 completion = mgr.remove_service('mds.' + volname)
218 orchestrator.raise_if_exception(completion)
219 except (ImportError, orchestrator.OrchestratorError):
220 log.warning("Failed to tear down orch service mds.%s", volname)
221 except Exception as e:
222 # Don't let detailed orchestrator exceptions (python backtraces)
223 # bubble out to the user
224 log.exception("Failed to tear down orch service mds.%s", volname)
225 return -errno.EINVAL, "", str(e)
226
227 outb = f"FS volume '{volname}' renamed to '{newvolname}'"
228 if data_pool_rename_failed:
229 outb += ". But failed to rename data pools as more than one data pool was found."
230
231 return r, outb, ""
232
233 def list_volumes(mgr):
234 """
235 list all filesystem volumes.
236
237 :param: None
238 :return: None
239 """
240 result = []
241 fs_map = mgr.get("fs_map")
242 for f in fs_map['filesystems']:
243 result.append({'name': f['mdsmap']['fs_name']})
244 return result
245
246
247 def get_pending_subvol_deletions_count(path):
248 """
249 Get the number of pending subvolumes deletions.
250 """
251 trashdir = os.path.join(path, Trash.GROUP_NAME)
252 try:
253 num_pending_subvol_del = len(os.listdir(trashdir))
254 except OSError as e:
255 if e.errno == errno.ENOENT:
256 num_pending_subvol_del = 0
257
258 return {'pending_subvolume_deletions': num_pending_subvol_del}
259
260
261 @contextmanager
262 def open_volume(vc, volname):
263 """
264 open a volume for exclusive access. This API is to be used as a contextr
265 manager.
266
267 :param vc: volume client instance
268 :param volname: volume name
269 :return: yields a volume handle (ceph filesystem handle)
270 """
271 g_lock = GlobalLock()
272 with g_lock.lock_op():
273 try:
274 with open_filesystem(vc, volname) as fs_handle:
275 yield fs_handle
276 except CephfsConnectionException as ce:
277 raise VolumeException(ce.errno, ce.error_str)
278
279
280 @contextmanager
281 def open_volume_lockless(vc, volname):
282 """
283 open a volume with shared access. This API is to be used as a context
284 manager.
285
286 :param vc: volume client instance
287 :param volname: volume name
288 :return: yields a volume handle (ceph filesystem handle)
289 """
290 try:
291 with open_filesystem(vc, volname) as fs_handle:
292 yield fs_handle
293 except CephfsConnectionException as ce:
294 raise VolumeException(ce.errno, ce.error_str)