5 from typing
import List
, Tuple
7 from contextlib
import contextmanager
11 from .lock
import GlobalLock
12 from ..exception
import VolumeException
13 from ..fs_util
import create_pool
, remove_pool
, rename_pool
, create_filesystem
, \
14 remove_filesystem
, rename_filesystem
, create_mds
, volume_exists
15 from .trash
import Trash
16 from mgr_util
import open_filesystem
, CephfsConnectionException
18 log
= logging
.getLogger(__name__
)
20 def gen_pool_names(volname
):
22 return metadata and data pool name (from a filesystem/volume name) as a tuple
24 return "cephfs.{}.meta".format(volname
), "cephfs.{}.data".format(volname
)
26 def get_mds_map(mgr
, volname
):
28 return mdsmap for a volname
31 fs_map
= mgr
.get("fs_map")
32 for f
in fs_map
['filesystems']:
33 if volname
== f
['mdsmap']['fs_name']:
37 def get_pool_names(mgr
, volname
):
39 return metadata and data pools (list) names of volume as a tuple
41 fs_map
= mgr
.get("fs_map")
42 metadata_pool_id
= None
43 data_pool_ids
= [] # type: List[int]
44 for f
in fs_map
['filesystems']:
45 if volname
== f
['mdsmap']['fs_name']:
46 metadata_pool_id
= f
['mdsmap']['metadata_pool']
47 data_pool_ids
= f
['mdsmap']['data_pools']
49 if metadata_pool_id
is None:
52 osdmap
= mgr
.get("osd_map")
53 pools
= dict([(p
['pool'], p
['pool_name']) for p
in osdmap
['pools']])
54 metadata_pool
= pools
[metadata_pool_id
]
55 data_pools
= [pools
[id] for id in data_pool_ids
]
56 return metadata_pool
, data_pools
58 def get_pool_ids(mgr
, volname
):
60 return metadata and data pools (list) id of volume as a tuple
62 fs_map
= mgr
.get("fs_map")
63 metadata_pool_id
= None
64 data_pool_ids
= [] # type: List[int]
65 for f
in fs_map
['filesystems']:
66 if volname
== f
['mdsmap']['fs_name']:
67 metadata_pool_id
= f
['mdsmap']['metadata_pool']
68 data_pool_ids
= f
['mdsmap']['data_pools']
70 if metadata_pool_id
is None:
72 return metadata_pool_id
, data_pool_ids
74 def create_volume(mgr
, volname
, placement
):
76 create volume (pool, filesystem and mds)
78 metadata_pool
, data_pool
= gen_pool_names(volname
)
80 r
, outb
, outs
= create_pool(mgr
, metadata_pool
)
83 r
, outb
, outs
= create_pool(mgr
, data_pool
)
86 remove_pool(mgr
, metadata_pool
)
89 r
, outb
, outs
= create_filesystem(mgr
, volname
, metadata_pool
, data_pool
)
91 log
.error("Filesystem creation error: {0} {1} {2}".format(r
, outb
, outs
))
93 remove_pool(mgr
, data_pool
)
94 remove_pool(mgr
, metadata_pool
)
96 return create_mds(mgr
, volname
, placement
)
99 def delete_volume(mgr
, volname
, metadata_pool
, data_pools
):
101 delete the given module (tear down mds, remove filesystem, remove pools)
103 # Tear down MDS daemons
105 completion
= mgr
.remove_service('mds.' + volname
)
106 orchestrator
.raise_if_exception(completion
)
107 except (ImportError, orchestrator
.OrchestratorError
):
108 log
.warning("OrchestratorError, not tearing down MDS daemons")
109 except Exception as e
:
110 # Don't let detailed orchestrator exceptions (python backtraces)
111 # bubble out to the user
112 log
.exception("Failed to tear down MDS daemons")
113 return -errno
.EINVAL
, "", str(e
)
115 # In case orchestrator didn't tear down MDS daemons cleanly, or
116 # there was no orchestrator, we force the daemons down.
117 if volume_exists(mgr
, volname
):
118 r
, outb
, outs
= remove_filesystem(mgr
, volname
)
122 err
= "Filesystem not found for volume '{0}'".format(volname
)
124 return -errno
.ENOENT
, "", err
125 r
, outb
, outs
= remove_pool(mgr
, metadata_pool
)
129 for data_pool
in data_pools
:
130 r
, outb
, outs
= remove_pool(mgr
, data_pool
)
133 result_str
= "metadata pool: {0} data pool: {1} removed".format(metadata_pool
, str(data_pools
))
134 return r
, result_str
, ""
136 def rename_volume(mgr
, volname
: str, newvolname
: str) -> Tuple
[int, str, str]:
138 rename volume (orch MDS service, file system, pools)
140 # To allow volume rename to be idempotent, check whether orch managed MDS
141 # service is already renamed. If so, skip renaming MDS service.
143 rename_mds_service
= True
145 completion
= mgr
.describe_service(
146 service_type
='mds', service_name
=f
"mds.{newvolname}", refresh
=True)
147 orchestrator
.raise_if_exception(completion
)
148 except (ImportError, orchestrator
.OrchestratorError
):
149 log
.warning("Failed to fetch orch service mds.%s", newvolname
)
150 except Exception as e
:
151 # Don't let detailed orchestrator exceptions (python backtraces)
152 # bubble out to the user
153 log
.exception("Failed to fetch orch service mds.%s", newvolname
)
154 return -errno
.EINVAL
, "", str(e
)
155 if completion
and completion
.result
:
156 rename_mds_service
= False
158 # Launch new MDS service matching newvolname
160 remove_mds_service
= False
161 if rename_mds_service
:
163 completion
= mgr
.describe_service(
164 service_type
='mds', service_name
=f
"mds.{volname}", refresh
=True)
165 orchestrator
.raise_if_exception(completion
)
166 except (ImportError, orchestrator
.OrchestratorError
):
167 log
.warning("Failed to fetch orch service mds.%s", volname
)
168 except Exception as e
:
169 # Don't let detailed orchestrator exceptions (python backtraces)
170 # bubble out to the user
171 log
.exception("Failed to fetch orch service mds.%s", volname
)
172 return -errno
.EINVAL
, "", str(e
)
173 if completion
and completion
.result
:
174 svc
= completion
.result
[0]
175 placement
= svc
.spec
.placement
.pretty_str()
176 create_mds(mgr
, newvolname
, placement
)
177 remove_mds_service
= True
179 # rename_filesytem is idempotent
180 r
, outb
, outs
= rename_filesystem(mgr
, volname
, newvolname
)
182 errmsg
= f
"Failed to rename file system '{volname}' to '{newvolname}'"
183 log
.error("Failed to rename file system '%s' to '%s'", volname
, newvolname
)
184 outs
= f
'{errmsg}; {outs}'
187 # Rename file system's metadata and data pools
188 metadata_pool
, data_pools
= get_pool_names(mgr
, newvolname
)
190 new_metadata_pool
, new_data_pool
= gen_pool_names(newvolname
)
191 if metadata_pool
!= new_metadata_pool
:
192 r
, outb
, outs
= rename_pool(mgr
, metadata_pool
, new_metadata_pool
)
194 errmsg
= f
"Failed to rename metadata pool '{metadata_pool}' to '{new_metadata_pool}'"
195 log
.error("Failed to rename metadata pool '%s' to '%s'", metadata_pool
, new_metadata_pool
)
196 outs
= f
'{errmsg}; {outs}'
199 data_pool_rename_failed
= False
200 # If file system has more than one data pool, then skip renaming
201 # the data pools, and proceed to remove the old MDS service.
202 if len(data_pools
) > 1:
203 data_pool_rename_failed
= True
205 data_pool
= data_pools
[0]
206 if data_pool
!= new_data_pool
:
207 r
, outb
, outs
= rename_pool(mgr
, data_pool
, new_data_pool
)
209 errmsg
= f
"Failed to rename data pool '{data_pool}' to '{new_data_pool}'"
210 log
.error("Failed to rename data pool '%s' to '%s'", data_pool
, new_data_pool
)
211 outs
= f
'{errmsg}; {outs}'
214 # Tear down old MDS service
215 if remove_mds_service
:
217 completion
= mgr
.remove_service('mds.' + volname
)
218 orchestrator
.raise_if_exception(completion
)
219 except (ImportError, orchestrator
.OrchestratorError
):
220 log
.warning("Failed to tear down orch service mds.%s", volname
)
221 except Exception as e
:
222 # Don't let detailed orchestrator exceptions (python backtraces)
223 # bubble out to the user
224 log
.exception("Failed to tear down orch service mds.%s", volname
)
225 return -errno
.EINVAL
, "", str(e
)
227 outb
= f
"FS volume '{volname}' renamed to '{newvolname}'"
228 if data_pool_rename_failed
:
229 outb
+= ". But failed to rename data pools as more than one data pool was found."
233 def list_volumes(mgr
):
235 list all filesystem volumes.
241 fs_map
= mgr
.get("fs_map")
242 for f
in fs_map
['filesystems']:
243 result
.append({'name': f
['mdsmap']['fs_name']})
247 def get_pending_subvol_deletions_count(path
):
249 Get the number of pending subvolumes deletions.
251 trashdir
= os
.path
.join(path
, Trash
.GROUP_NAME
)
253 num_pending_subvol_del
= len(os
.listdir(trashdir
))
255 if e
.errno
== errno
.ENOENT
:
256 num_pending_subvol_del
= 0
258 return {'pending_subvolume_deletions': num_pending_subvol_del
}
262 def open_volume(vc
, volname
):
264 open a volume for exclusive access. This API is to be used as a contextr
267 :param vc: volume client instance
268 :param volname: volume name
269 :return: yields a volume handle (ceph filesystem handle)
271 g_lock
= GlobalLock()
272 with g_lock
.lock_op():
274 with
open_filesystem(vc
, volname
) as fs_handle
:
276 except CephfsConnectionException
as ce
:
277 raise VolumeException(ce
.errno
, ce
.error_str
)
281 def open_volume_lockless(vc
, volname
):
283 open a volume with shared access. This API is to be used as a context
286 :param vc: volume client instance
287 :param volname: volume name
288 :return: yields a volume handle (ceph filesystem handle)
291 with
open_filesystem(vc
, volname
) as fs_handle
:
293 except CephfsConnectionException
as ce
:
294 raise VolumeException(ce
.errno
, ce
.error_str
)