5 from typing
import List
, Tuple
7 from contextlib
import contextmanager
11 from .lock
import GlobalLock
12 from ..exception
import VolumeException
13 from ..fs_util
import create_pool
, remove_pool
, rename_pool
, create_filesystem
, \
14 remove_filesystem
, rename_filesystem
, create_mds
, volume_exists
, listdir
15 from .trash
import Trash
16 from mgr_util
import open_filesystem
, CephfsConnectionException
18 log
= logging
.getLogger(__name__
)
20 def gen_pool_names(volname
):
22 return metadata and data pool name (from a filesystem/volume name) as a tuple
24 return "cephfs.{}.meta".format(volname
), "cephfs.{}.data".format(volname
)
26 def get_mds_map(mgr
, volname
):
28 return mdsmap for a volname
31 fs_map
= mgr
.get("fs_map")
32 for f
in fs_map
['filesystems']:
33 if volname
== f
['mdsmap']['fs_name']:
37 def get_pool_names(mgr
, volname
):
39 return metadata and data pools (list) names of volume as a tuple
41 fs_map
= mgr
.get("fs_map")
42 metadata_pool_id
= None
43 data_pool_ids
= [] # type: List[int]
44 for f
in fs_map
['filesystems']:
45 if volname
== f
['mdsmap']['fs_name']:
46 metadata_pool_id
= f
['mdsmap']['metadata_pool']
47 data_pool_ids
= f
['mdsmap']['data_pools']
49 if metadata_pool_id
is None:
52 osdmap
= mgr
.get("osd_map")
53 pools
= dict([(p
['pool'], p
['pool_name']) for p
in osdmap
['pools']])
54 metadata_pool
= pools
[metadata_pool_id
]
55 data_pools
= [pools
[id] for id in data_pool_ids
]
56 return metadata_pool
, data_pools
58 def get_pool_ids(mgr
, volname
):
60 return metadata and data pools (list) id of volume as a tuple
62 fs_map
= mgr
.get("fs_map")
63 metadata_pool_id
= None
64 data_pool_ids
= [] # type: List[int]
65 for f
in fs_map
['filesystems']:
66 if volname
== f
['mdsmap']['fs_name']:
67 metadata_pool_id
= f
['mdsmap']['metadata_pool']
68 data_pool_ids
= f
['mdsmap']['data_pools']
70 if metadata_pool_id
is None:
72 return metadata_pool_id
, data_pool_ids
74 def create_volume(mgr
, volname
, placement
):
76 create volume (pool, filesystem and mds)
78 metadata_pool
, data_pool
= gen_pool_names(volname
)
80 r
, outb
, outs
= create_pool(mgr
, metadata_pool
)
83 # default to a bulk pool for data. In case autoscaling has been disabled
84 # for the cluster with `ceph osd pool set noautoscale`, this will have no effect.
85 r
, outb
, outs
= create_pool(mgr
, data_pool
, bulk
=True)
88 remove_pool(mgr
, metadata_pool
)
91 r
, outb
, outs
= create_filesystem(mgr
, volname
, metadata_pool
, data_pool
)
93 log
.error("Filesystem creation error: {0} {1} {2}".format(r
, outb
, outs
))
95 remove_pool(mgr
, data_pool
)
96 remove_pool(mgr
, metadata_pool
)
98 return create_mds(mgr
, volname
, placement
)
101 def delete_volume(mgr
, volname
, metadata_pool
, data_pools
):
103 delete the given module (tear down mds, remove filesystem, remove pools)
105 # Tear down MDS daemons
107 completion
= mgr
.remove_service('mds.' + volname
)
108 orchestrator
.raise_if_exception(completion
)
109 except (ImportError, orchestrator
.OrchestratorError
):
110 log
.warning("OrchestratorError, not tearing down MDS daemons")
111 except Exception as e
:
112 # Don't let detailed orchestrator exceptions (python backtraces)
113 # bubble out to the user
114 log
.exception("Failed to tear down MDS daemons")
115 return -errno
.EINVAL
, "", str(e
)
117 # In case orchestrator didn't tear down MDS daemons cleanly, or
118 # there was no orchestrator, we force the daemons down.
119 if volume_exists(mgr
, volname
):
120 r
, outb
, outs
= remove_filesystem(mgr
, volname
)
124 err
= "Filesystem not found for volume '{0}'".format(volname
)
126 return -errno
.ENOENT
, "", err
127 r
, outb
, outs
= remove_pool(mgr
, metadata_pool
)
131 for data_pool
in data_pools
:
132 r
, outb
, outs
= remove_pool(mgr
, data_pool
)
135 result_str
= "metadata pool: {0} data pool: {1} removed".format(metadata_pool
, str(data_pools
))
136 return r
, result_str
, ""
138 def rename_volume(mgr
, volname
: str, newvolname
: str) -> Tuple
[int, str, str]:
140 rename volume (orch MDS service, file system, pools)
142 # To allow volume rename to be idempotent, check whether orch managed MDS
143 # service is already renamed. If so, skip renaming MDS service.
145 rename_mds_service
= True
147 completion
= mgr
.describe_service(
148 service_type
='mds', service_name
=f
"mds.{newvolname}", refresh
=True)
149 orchestrator
.raise_if_exception(completion
)
150 except (ImportError, orchestrator
.OrchestratorError
):
151 log
.warning("Failed to fetch orch service mds.%s", newvolname
)
152 except Exception as e
:
153 # Don't let detailed orchestrator exceptions (python backtraces)
154 # bubble out to the user
155 log
.exception("Failed to fetch orch service mds.%s", newvolname
)
156 return -errno
.EINVAL
, "", str(e
)
157 if completion
and completion
.result
:
158 rename_mds_service
= False
160 # Launch new MDS service matching newvolname
162 remove_mds_service
= False
163 if rename_mds_service
:
165 completion
= mgr
.describe_service(
166 service_type
='mds', service_name
=f
"mds.{volname}", refresh
=True)
167 orchestrator
.raise_if_exception(completion
)
168 except (ImportError, orchestrator
.OrchestratorError
):
169 log
.warning("Failed to fetch orch service mds.%s", volname
)
170 except Exception as e
:
171 # Don't let detailed orchestrator exceptions (python backtraces)
172 # bubble out to the user
173 log
.exception("Failed to fetch orch service mds.%s", volname
)
174 return -errno
.EINVAL
, "", str(e
)
175 if completion
and completion
.result
:
176 svc
= completion
.result
[0]
177 placement
= svc
.spec
.placement
.pretty_str()
178 create_mds(mgr
, newvolname
, placement
)
179 remove_mds_service
= True
181 # rename_filesytem is idempotent
182 r
, outb
, outs
= rename_filesystem(mgr
, volname
, newvolname
)
184 errmsg
= f
"Failed to rename file system '{volname}' to '{newvolname}'"
185 log
.error("Failed to rename file system '%s' to '%s'", volname
, newvolname
)
186 outs
= f
'{errmsg}; {outs}'
189 # Rename file system's metadata and data pools
190 metadata_pool
, data_pools
= get_pool_names(mgr
, newvolname
)
192 new_metadata_pool
, new_data_pool
= gen_pool_names(newvolname
)
193 if metadata_pool
!= new_metadata_pool
:
194 r
, outb
, outs
= rename_pool(mgr
, metadata_pool
, new_metadata_pool
)
196 errmsg
= f
"Failed to rename metadata pool '{metadata_pool}' to '{new_metadata_pool}'"
197 log
.error("Failed to rename metadata pool '%s' to '%s'", metadata_pool
, new_metadata_pool
)
198 outs
= f
'{errmsg}; {outs}'
201 data_pool_rename_failed
= False
202 # If file system has more than one data pool, then skip renaming
203 # the data pools, and proceed to remove the old MDS service.
204 if len(data_pools
) > 1:
205 data_pool_rename_failed
= True
207 data_pool
= data_pools
[0]
208 if data_pool
!= new_data_pool
:
209 r
, outb
, outs
= rename_pool(mgr
, data_pool
, new_data_pool
)
211 errmsg
= f
"Failed to rename data pool '{data_pool}' to '{new_data_pool}'"
212 log
.error("Failed to rename data pool '%s' to '%s'", data_pool
, new_data_pool
)
213 outs
= f
'{errmsg}; {outs}'
216 # Tear down old MDS service
217 if remove_mds_service
:
219 completion
= mgr
.remove_service('mds.' + volname
)
220 orchestrator
.raise_if_exception(completion
)
221 except (ImportError, orchestrator
.OrchestratorError
):
222 log
.warning("Failed to tear down orch service mds.%s", volname
)
223 except Exception as e
:
224 # Don't let detailed orchestrator exceptions (python backtraces)
225 # bubble out to the user
226 log
.exception("Failed to tear down orch service mds.%s", volname
)
227 return -errno
.EINVAL
, "", str(e
)
229 outb
= f
"FS volume '{volname}' renamed to '{newvolname}'"
230 if data_pool_rename_failed
:
231 outb
+= ". But failed to rename data pools as more than one data pool was found."
235 def list_volumes(mgr
):
237 list all filesystem volumes.
243 fs_map
= mgr
.get("fs_map")
244 for f
in fs_map
['filesystems']:
245 result
.append({'name': f
['mdsmap']['fs_name']})
249 def get_pending_subvol_deletions_count(fs
, path
):
251 Get the number of pending subvolumes deletions.
253 trashdir
= os
.path
.join(path
, Trash
.GROUP_NAME
)
255 num_pending_subvol_del
= len(listdir(fs
, trashdir
, filter_entries
=None, filter_files
=False))
256 except VolumeException
as ve
:
257 if ve
.errno
== -errno
.ENOENT
:
258 num_pending_subvol_del
= 0
260 return {'pending_subvolume_deletions': num_pending_subvol_del
}
264 def open_volume(vc
, volname
):
266 open a volume for exclusive access. This API is to be used as a contextr
269 :param vc: volume client instance
270 :param volname: volume name
271 :return: yields a volume handle (ceph filesystem handle)
273 g_lock
= GlobalLock()
274 with g_lock
.lock_op():
276 with
open_filesystem(vc
, volname
) as fs_handle
:
278 except CephfsConnectionException
as ce
:
279 raise VolumeException(ce
.errno
, ce
.error_str
)
283 def open_volume_lockless(vc
, volname
):
285 open a volume with shared access. This API is to be used as a context
288 :param vc: volume client instance
289 :param volname: volume name
290 :return: yields a volume handle (ceph filesystem handle)
293 with
open_filesystem(vc
, volname
) as fs_handle
:
295 except CephfsConnectionException
as ce
:
296 raise VolumeException(ce
.errno
, ce
.error_str
)