]> git.proxmox.com Git - ceph.git/blob - ceph/src/pybind/mgr/volumes/fs/volume.py
import quincy beta 17.1.0
[ceph.git] / ceph / src / pybind / mgr / volumes / fs / volume.py
1 import json
2 import errno
3 import logging
4 from typing import TYPE_CHECKING
5
6 import cephfs
7
8 from mgr_util import CephfsClient
9
10 from .fs_util import listdir
11
12 from .operations.volume import create_volume, \
13 delete_volume, list_volumes, open_volume, get_pool_names
14 from .operations.group import open_group, create_group, remove_group, open_group_unique
15 from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \
16 create_clone
17
18 from .vol_spec import VolSpec
19 from .exception import VolumeException, ClusterError, ClusterTimeout, EvictionError
20 from .async_cloner import Cloner
21 from .purge_queue import ThreadPoolPurgeQueueMixin
22 from .operations.template import SubvolumeOpType
23
24 if TYPE_CHECKING:
25 from volumes import Module
26
27 log = logging.getLogger(__name__)
28
29 ALLOWED_ACCESS_LEVELS = ('r', 'rw')
30
31
32 def octal_str_to_decimal_int(mode):
33 try:
34 return int(mode, 8)
35 except ValueError:
36 raise VolumeException(-errno.EINVAL, "Invalid mode '{0}'".format(mode))
37
38
39 def name_to_json(names):
40 """
41 convert the list of names to json
42 """
43 namedict = []
44 for i in range(len(names)):
45 namedict.append({'name': names[i].decode('utf-8')})
46 return json.dumps(namedict, indent=4, sort_keys=True)
47
48
49 class VolumeClient(CephfsClient["Module"]):
50 def __init__(self, mgr):
51 super().__init__(mgr)
52 # volume specification
53 self.volspec = VolSpec(mgr.rados.conf_get('client_snapdir'))
54 self.cloner = Cloner(self, self.mgr.max_concurrent_clones, self.mgr.snapshot_clone_delay)
55 self.purge_queue = ThreadPoolPurgeQueueMixin(self, 4)
56 # on startup, queue purge job for available volumes to kickstart
57 # purge for leftover subvolume entries in trash. note that, if the
58 # trash directory does not exist or if there are no purge entries
59 # available for a volume, the volume is removed from the purge
60 # job list.
61 fs_map = self.mgr.get('fs_map')
62 for fs in fs_map['filesystems']:
63 self.cloner.queue_job(fs['mdsmap']['fs_name'])
64 self.purge_queue.queue_job(fs['mdsmap']['fs_name'])
65
66 def shutdown(self):
67 # Overrides CephfsClient.shutdown()
68 log.info("shutting down")
69 # first, note that we're shutting down
70 self.stopping.set()
71 # stop clones
72 self.cloner.shutdown()
73 # stop purge threads
74 self.purge_queue.shutdown()
75 # last, delete all libcephfs handles from connection pool
76 self.connection_pool.del_all_connections()
77
78 def cluster_log(self, msg, lvl=None):
79 """
80 log to cluster log with default log level as WARN.
81 """
82 if not lvl:
83 lvl = self.mgr.ClusterLogPrio.WARN
84 self.mgr.cluster_log("cluster", lvl, msg)
85
86 def volume_exception_to_retval(self, ve):
87 """
88 return a tuple representation from a volume exception
89 """
90 return ve.to_tuple()
91
92 ### volume operations -- create, rm, ls
93
94 def create_fs_volume(self, volname, placement):
95 if self.is_stopping():
96 return -errno.ESHUTDOWN, "", "shutdown in progress"
97 return create_volume(self.mgr, volname, placement)
98
99 def delete_fs_volume(self, volname, confirm):
100 if self.is_stopping():
101 return -errno.ESHUTDOWN, "", "shutdown in progress"
102
103 if confirm != "--yes-i-really-mean-it":
104 return -errno.EPERM, "", "WARNING: this will *PERMANENTLY DESTROY* all data " \
105 "stored in the filesystem '{0}'. If you are *ABSOLUTELY CERTAIN* " \
106 "that is what you want, re-issue the command followed by " \
107 "--yes-i-really-mean-it.".format(volname)
108
109 ret, out, err = self.mgr.check_mon_command({
110 'prefix': 'config get',
111 'key': 'mon_allow_pool_delete',
112 'who': 'mon',
113 'format': 'json',
114 })
115 mon_allow_pool_delete = json.loads(out)
116 if not mon_allow_pool_delete:
117 return -errno.EPERM, "", "pool deletion is disabled; you must first " \
118 "set the mon_allow_pool_delete config option to true before volumes " \
119 "can be deleted"
120
121 metadata_pool, data_pools = get_pool_names(self.mgr, volname)
122 if not metadata_pool:
123 return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)
124 self.purge_queue.cancel_jobs(volname)
125 self.connection_pool.del_connections(volname, wait=True)
126 return delete_volume(self.mgr, volname, metadata_pool, data_pools)
127
128 def list_fs_volumes(self):
129 if self.stopping.is_set():
130 return -errno.ESHUTDOWN, "", "shutdown in progress"
131 volumes = list_volumes(self.mgr)
132 return 0, json.dumps(volumes, indent=4, sort_keys=True), ""
133
134 ### subvolume operations
135
136 def _create_subvolume(self, fs_handle, volname, group, subvolname, **kwargs):
137 size = kwargs['size']
138 pool = kwargs['pool_layout']
139 uid = kwargs['uid']
140 gid = kwargs['gid']
141 mode = kwargs['mode']
142 isolate_nspace = kwargs['namespace_isolated']
143
144 oct_mode = octal_str_to_decimal_int(mode)
145 try:
146 create_subvol(
147 self.mgr, fs_handle, self.volspec, group, subvolname, size, isolate_nspace, pool, oct_mode, uid, gid)
148 except VolumeException as ve:
149 # kick the purge threads for async removal -- note that this
150 # assumes that the subvolume is moved to trashcan for cleanup on error.
151 self.purge_queue.queue_job(volname)
152 raise ve
153
154 def create_subvolume(self, **kwargs):
155 ret = 0, "", ""
156 volname = kwargs['vol_name']
157 subvolname = kwargs['sub_name']
158 groupname = kwargs['group_name']
159 size = kwargs['size']
160 pool = kwargs['pool_layout']
161 uid = kwargs['uid']
162 gid = kwargs['gid']
163 isolate_nspace = kwargs['namespace_isolated']
164
165 try:
166 with open_volume(self, volname) as fs_handle:
167 with open_group(fs_handle, self.volspec, groupname) as group:
168 try:
169 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.CREATE) as subvolume:
170 # idempotent creation -- valid. Attributes set is supported.
171 attrs = {
172 'uid': uid if uid else subvolume.uid,
173 'gid': gid if gid else subvolume.gid,
174 'data_pool': pool,
175 'pool_namespace': subvolume.namespace if isolate_nspace else None,
176 'quota': size
177 }
178 subvolume.set_attrs(subvolume.path, attrs)
179 except VolumeException as ve:
180 if ve.errno == -errno.ENOENT:
181 self._create_subvolume(fs_handle, volname, group, subvolname, **kwargs)
182 else:
183 raise
184 except VolumeException as ve:
185 # volume/group does not exist or subvolume creation failed
186 ret = self.volume_exception_to_retval(ve)
187 return ret
188
189 def remove_subvolume(self, **kwargs):
190 ret = 0, "", ""
191 volname = kwargs['vol_name']
192 subvolname = kwargs['sub_name']
193 groupname = kwargs['group_name']
194 force = kwargs['force']
195 retainsnaps = kwargs['retain_snapshots']
196
197 try:
198 with open_volume(self, volname) as fs_handle:
199 with open_group(fs_handle, self.volspec, groupname) as group:
200 remove_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, force, retainsnaps)
201 # kick the purge threads for async removal -- note that this
202 # assumes that the subvolume is moved to trash can.
203 # TODO: make purge queue as singleton so that trash can kicks
204 # the purge threads on dump.
205 self.purge_queue.queue_job(volname)
206 except VolumeException as ve:
207 if ve.errno == -errno.EAGAIN and not force:
208 ve = VolumeException(ve.errno, ve.error_str + " (use --force to override)")
209 ret = self.volume_exception_to_retval(ve)
210 elif not (ve.errno == -errno.ENOENT and force):
211 ret = self.volume_exception_to_retval(ve)
212 return ret
213
214 def authorize_subvolume(self, **kwargs):
215 ret = 0, "", ""
216 volname = kwargs['vol_name']
217 subvolname = kwargs['sub_name']
218 authid = kwargs['auth_id']
219 groupname = kwargs['group_name']
220 accesslevel = kwargs['access_level']
221 tenant_id = kwargs['tenant_id']
222 allow_existing_id = kwargs['allow_existing_id']
223
224 try:
225 with open_volume(self, volname) as fs_handle:
226 with open_group(fs_handle, self.volspec, groupname) as group:
227 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.ALLOW_ACCESS) as subvolume:
228 key = subvolume.authorize(authid, accesslevel, tenant_id, allow_existing_id)
229 ret = 0, key, ""
230 except VolumeException as ve:
231 ret = self.volume_exception_to_retval(ve)
232 return ret
233
234 def deauthorize_subvolume(self, **kwargs):
235 ret = 0, "", ""
236 volname = kwargs['vol_name']
237 subvolname = kwargs['sub_name']
238 authid = kwargs['auth_id']
239 groupname = kwargs['group_name']
240
241 try:
242 with open_volume(self, volname) as fs_handle:
243 with open_group(fs_handle, self.volspec, groupname) as group:
244 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.DENY_ACCESS) as subvolume:
245 subvolume.deauthorize(authid)
246 except VolumeException as ve:
247 ret = self.volume_exception_to_retval(ve)
248 return ret
249
250 def authorized_list(self, **kwargs):
251 ret = 0, "", ""
252 volname = kwargs['vol_name']
253 subvolname = kwargs['sub_name']
254 groupname = kwargs['group_name']
255
256 try:
257 with open_volume(self, volname) as fs_handle:
258 with open_group(fs_handle, self.volspec, groupname) as group:
259 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.AUTH_LIST) as subvolume:
260 auths = subvolume.authorized_list()
261 ret = 0, json.dumps(auths, indent=4, sort_keys=True), ""
262 except VolumeException as ve:
263 ret = self.volume_exception_to_retval(ve)
264 return ret
265
266 def evict(self, **kwargs):
267 ret = 0, "", ""
268 volname = kwargs['vol_name']
269 subvolname = kwargs['sub_name']
270 authid = kwargs['auth_id']
271 groupname = kwargs['group_name']
272
273 try:
274 with open_volume(self, volname) as fs_handle:
275 with open_group(fs_handle, self.volspec, groupname) as group:
276 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.EVICT) as subvolume:
277 key = subvolume.evict(volname, authid)
278 ret = 0, "", ""
279 except (VolumeException, ClusterTimeout, ClusterError, EvictionError) as e:
280 if isinstance(e, VolumeException):
281 ret = self.volume_exception_to_retval(e)
282 elif isinstance(e, ClusterTimeout):
283 ret = -errno.ETIMEDOUT , "", "Timedout trying to talk to ceph cluster"
284 elif isinstance(e, ClusterError):
285 ret = e._result_code , "", e._result_str
286 elif isinstance(e, EvictionError):
287 ret = -errno.EINVAL, "", str(e)
288 return ret
289
290 def resize_subvolume(self, **kwargs):
291 ret = 0, "", ""
292 volname = kwargs['vol_name']
293 subvolname = kwargs['sub_name']
294 newsize = kwargs['new_size']
295 noshrink = kwargs['no_shrink']
296 groupname = kwargs['group_name']
297
298 try:
299 with open_volume(self, volname) as fs_handle:
300 with open_group(fs_handle, self.volspec, groupname) as group:
301 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.RESIZE) as subvolume:
302 nsize, usedbytes = subvolume.resize(newsize, noshrink)
303 ret = 0, json.dumps(
304 [{'bytes_used': usedbytes},{'bytes_quota': nsize},
305 {'bytes_pcent': "undefined" if nsize == 0 else '{0:.2f}'.format((float(usedbytes) / nsize) * 100.0)}],
306 indent=4, sort_keys=True), ""
307 except VolumeException as ve:
308 ret = self.volume_exception_to_retval(ve)
309 return ret
310
311 def subvolume_pin(self, **kwargs):
312 ret = 0, "", ""
313 volname = kwargs['vol_name']
314 subvolname = kwargs['sub_name']
315 pin_type = kwargs['pin_type']
316 pin_setting = kwargs['pin_setting']
317 groupname = kwargs['group_name']
318
319 try:
320 with open_volume(self, volname) as fs_handle:
321 with open_group(fs_handle, self.volspec, groupname) as group:
322 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.PIN) as subvolume:
323 subvolume.pin(pin_type, pin_setting)
324 ret = 0, json.dumps({}), ""
325 except VolumeException as ve:
326 ret = self.volume_exception_to_retval(ve)
327 return ret
328
329 def subvolume_getpath(self, **kwargs):
330 ret = None
331 volname = kwargs['vol_name']
332 subvolname = kwargs['sub_name']
333 groupname = kwargs['group_name']
334
335 try:
336 with open_volume(self, volname) as fs_handle:
337 with open_group(fs_handle, self.volspec, groupname) as group:
338 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.GETPATH) as subvolume:
339 subvolpath = subvolume.path
340 ret = 0, subvolpath.decode("utf-8"), ""
341 except VolumeException as ve:
342 ret = self.volume_exception_to_retval(ve)
343 return ret
344
345 def subvolume_info(self, **kwargs):
346 ret = None
347 volname = kwargs['vol_name']
348 subvolname = kwargs['sub_name']
349 groupname = kwargs['group_name']
350
351 try:
352 with open_volume(self, volname) as fs_handle:
353 with open_group(fs_handle, self.volspec, groupname) as group:
354 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.INFO) as subvolume:
355 mon_addr_lst = []
356 mon_map_mons = self.mgr.get('mon_map')['mons']
357 for mon in mon_map_mons:
358 ip_port = mon['addr'].split("/")[0]
359 mon_addr_lst.append(ip_port)
360
361 subvol_info_dict = subvolume.info()
362 subvol_info_dict["mon_addrs"] = mon_addr_lst
363 ret = 0, json.dumps(subvol_info_dict, indent=4, sort_keys=True), ""
364 except VolumeException as ve:
365 ret = self.volume_exception_to_retval(ve)
366 return ret
367
368 def list_subvolumes(self, **kwargs):
369 ret = 0, "", ""
370 volname = kwargs['vol_name']
371 groupname = kwargs['group_name']
372
373 try:
374 with open_volume(self, volname) as fs_handle:
375 with open_group(fs_handle, self.volspec, groupname) as group:
376 subvolumes = group.list_subvolumes()
377 ret = 0, name_to_json(subvolumes), ""
378 except VolumeException as ve:
379 ret = self.volume_exception_to_retval(ve)
380 return ret
381
382 ### subvolume snapshot
383
384 def create_subvolume_snapshot(self, **kwargs):
385 ret = 0, "", ""
386 volname = kwargs['vol_name']
387 subvolname = kwargs['sub_name']
388 snapname = kwargs['snap_name']
389 groupname = kwargs['group_name']
390
391 try:
392 with open_volume(self, volname) as fs_handle:
393 with open_group(fs_handle, self.volspec, groupname) as group:
394 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_CREATE) as subvolume:
395 subvolume.create_snapshot(snapname)
396 except VolumeException as ve:
397 ret = self.volume_exception_to_retval(ve)
398 return ret
399
400 def remove_subvolume_snapshot(self, **kwargs):
401 ret = 0, "", ""
402 volname = kwargs['vol_name']
403 subvolname = kwargs['sub_name']
404 snapname = kwargs['snap_name']
405 groupname = kwargs['group_name']
406 force = kwargs['force']
407
408 try:
409 with open_volume(self, volname) as fs_handle:
410 with open_group(fs_handle, self.volspec, groupname) as group:
411 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_REMOVE) as subvolume:
412 subvolume.remove_snapshot(snapname)
413 except VolumeException as ve:
414 # ESTALE serves as an error to state that subvolume is currently stale due to internal removal and,
415 # we should tickle the purge jobs to purge the same
416 if ve.errno == -errno.ESTALE:
417 self.purge_queue.queue_job(volname)
418 elif not (ve.errno == -errno.ENOENT and force):
419 ret = self.volume_exception_to_retval(ve)
420 return ret
421
422 def subvolume_snapshot_info(self, **kwargs):
423 ret = 0, "", ""
424 volname = kwargs['vol_name']
425 subvolname = kwargs['sub_name']
426 snapname = kwargs['snap_name']
427 groupname = kwargs['group_name']
428
429 try:
430 with open_volume(self, volname) as fs_handle:
431 with open_group(fs_handle, self.volspec, groupname) as group:
432 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_INFO) as subvolume:
433 snap_info_dict = subvolume.snapshot_info(snapname)
434 ret = 0, json.dumps(snap_info_dict, indent=4, sort_keys=True), ""
435 except VolumeException as ve:
436 ret = self.volume_exception_to_retval(ve)
437 return ret
438
439 def list_subvolume_snapshots(self, **kwargs):
440 ret = 0, "", ""
441 volname = kwargs['vol_name']
442 subvolname = kwargs['sub_name']
443 groupname = kwargs['group_name']
444
445 try:
446 with open_volume(self, volname) as fs_handle:
447 with open_group(fs_handle, self.volspec, groupname) as group:
448 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_LIST) as subvolume:
449 snapshots = subvolume.list_snapshots()
450 ret = 0, name_to_json(snapshots), ""
451 except VolumeException as ve:
452 ret = self.volume_exception_to_retval(ve)
453 return ret
454
455 def protect_subvolume_snapshot(self, **kwargs):
456 ret = 0, "", "Deprecation warning: 'snapshot protect' call is deprecated and will be removed in a future release"
457 volname = kwargs['vol_name']
458 subvolname = kwargs['sub_name']
459 groupname = kwargs['group_name']
460
461 try:
462 with open_volume(self, volname) as fs_handle:
463 with open_group(fs_handle, self.volspec, groupname) as group:
464 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_PROTECT) as subvolume:
465 log.warning("snapshot protect call is deprecated and will be removed in a future release")
466 except VolumeException as ve:
467 ret = self.volume_exception_to_retval(ve)
468 return ret
469
470 def unprotect_subvolume_snapshot(self, **kwargs):
471 ret = 0, "", "Deprecation warning: 'snapshot unprotect' call is deprecated and will be removed in a future release"
472 volname = kwargs['vol_name']
473 subvolname = kwargs['sub_name']
474 groupname = kwargs['group_name']
475
476 try:
477 with open_volume(self, volname) as fs_handle:
478 with open_group(fs_handle, self.volspec, groupname) as group:
479 with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_UNPROTECT) as subvolume:
480 log.warning("snapshot unprotect call is deprecated and will be removed in a future release")
481 except VolumeException as ve:
482 ret = self.volume_exception_to_retval(ve)
483 return ret
484
485 def _prepare_clone_subvolume(self, fs_handle, volname, s_subvolume, s_snapname, t_group, t_subvolname, **kwargs):
486 t_pool = kwargs['pool_layout']
487 s_subvolname = kwargs['sub_name']
488 s_groupname = kwargs['group_name']
489 t_groupname = kwargs['target_group_name']
490
491 create_clone(self.mgr, fs_handle, self.volspec, t_group, t_subvolname, t_pool, volname, s_subvolume, s_snapname)
492 with open_subvol(self.mgr, fs_handle, self.volspec, t_group, t_subvolname, SubvolumeOpType.CLONE_INTERNAL) as t_subvolume:
493 try:
494 if t_groupname == s_groupname and t_subvolname == s_subvolname:
495 t_subvolume.attach_snapshot(s_snapname, t_subvolume)
496 else:
497 s_subvolume.attach_snapshot(s_snapname, t_subvolume)
498 self.cloner.queue_job(volname)
499 except VolumeException as ve:
500 try:
501 t_subvolume.remove()
502 self.purge_queue.queue_job(volname)
503 except Exception as e:
504 log.warning("failed to cleanup clone subvolume '{0}' ({1})".format(t_subvolname, e))
505 raise ve
506
507 def _clone_subvolume_snapshot(self, fs_handle, volname, s_group, s_subvolume, **kwargs):
508 s_snapname = kwargs['snap_name']
509 target_subvolname = kwargs['target_sub_name']
510 target_groupname = kwargs['target_group_name']
511 s_groupname = kwargs['group_name']
512
513 if not s_snapname.encode('utf-8') in s_subvolume.list_snapshots():
514 raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(s_snapname))
515
516 with open_group_unique(fs_handle, self.volspec, target_groupname, s_group, s_groupname) as target_group:
517 try:
518 with open_subvol(self.mgr, fs_handle, self.volspec, target_group, target_subvolname, SubvolumeOpType.CLONE_CREATE):
519 raise VolumeException(-errno.EEXIST, "subvolume '{0}' exists".format(target_subvolname))
520 except VolumeException as ve:
521 if ve.errno == -errno.ENOENT:
522 self._prepare_clone_subvolume(fs_handle, volname, s_subvolume, s_snapname,
523 target_group, target_subvolname, **kwargs)
524 else:
525 raise
526
527 def clone_subvolume_snapshot(self, **kwargs):
528 ret = 0, "", ""
529 volname = kwargs['vol_name']
530 s_subvolname = kwargs['sub_name']
531 s_groupname = kwargs['group_name']
532
533 try:
534 with open_volume(self, volname) as fs_handle:
535 with open_group(fs_handle, self.volspec, s_groupname) as s_group:
536 with open_subvol(self.mgr, fs_handle, self.volspec, s_group, s_subvolname, SubvolumeOpType.CLONE_SOURCE) as s_subvolume:
537 self._clone_subvolume_snapshot(fs_handle, volname, s_group, s_subvolume, **kwargs)
538 except VolumeException as ve:
539 ret = self.volume_exception_to_retval(ve)
540 return ret
541
542 def clone_status(self, **kwargs):
543 ret = 0, "", ""
544 volname = kwargs['vol_name']
545 clonename = kwargs['clone_name']
546 groupname = kwargs['group_name']
547
548 try:
549 with open_volume(self, volname) as fs_handle:
550 with open_group(fs_handle, self.volspec, groupname) as group:
551 with open_subvol(self.mgr, fs_handle, self.volspec, group, clonename, SubvolumeOpType.CLONE_STATUS) as subvolume:
552 ret = 0, json.dumps({'status' : subvolume.status}, indent=2), ""
553 except VolumeException as ve:
554 ret = self.volume_exception_to_retval(ve)
555 return ret
556
557 def clone_cancel(self, **kwargs):
558 ret = 0, "", ""
559 volname = kwargs['vol_name']
560 clonename = kwargs['clone_name']
561 groupname = kwargs['group_name']
562
563 try:
564 self.cloner.cancel_job(volname, (clonename, groupname))
565 except VolumeException as ve:
566 ret = self.volume_exception_to_retval(ve)
567 return ret
568
569 ### group operations
570
571 def create_subvolume_group(self, **kwargs):
572 ret = 0, "", ""
573 volname = kwargs['vol_name']
574 groupname = kwargs['group_name']
575 pool = kwargs['pool_layout']
576 uid = kwargs['uid']
577 gid = kwargs['gid']
578 mode = kwargs['mode']
579
580 try:
581 with open_volume(self, volname) as fs_handle:
582 try:
583 with open_group(fs_handle, self.volspec, groupname):
584 # idempotent creation -- valid.
585 pass
586 except VolumeException as ve:
587 if ve.errno == -errno.ENOENT:
588 oct_mode = octal_str_to_decimal_int(mode)
589 create_group(fs_handle, self.volspec, groupname, pool, oct_mode, uid, gid)
590 else:
591 raise
592 except VolumeException as ve:
593 # volume does not exist or subvolume group creation failed
594 ret = self.volume_exception_to_retval(ve)
595 return ret
596
597 def remove_subvolume_group(self, **kwargs):
598 ret = 0, "", ""
599 volname = kwargs['vol_name']
600 groupname = kwargs['group_name']
601 force = kwargs['force']
602
603 try:
604 with open_volume(self, volname) as fs_handle:
605 remove_group(fs_handle, self.volspec, groupname)
606 except VolumeException as ve:
607 if not (ve.errno == -errno.ENOENT and force):
608 ret = self.volume_exception_to_retval(ve)
609 return ret
610
611 def getpath_subvolume_group(self, **kwargs):
612 volname = kwargs['vol_name']
613 groupname = kwargs['group_name']
614
615 try:
616 with open_volume(self, volname) as fs_handle:
617 with open_group(fs_handle, self.volspec, groupname) as group:
618 return 0, group.path.decode('utf-8'), ""
619 except VolumeException as ve:
620 return self.volume_exception_to_retval(ve)
621
622 def list_subvolume_groups(self, **kwargs):
623 volname = kwargs['vol_name']
624 ret = 0, '[]', ""
625 volume_exists = False
626 try:
627 with open_volume(self, volname) as fs_handle:
628 volume_exists = True
629 groups = listdir(fs_handle, self.volspec.base_dir)
630 ret = 0, name_to_json(groups), ""
631 except VolumeException as ve:
632 if not ve.errno == -errno.ENOENT or not volume_exists:
633 ret = self.volume_exception_to_retval(ve)
634 return ret
635
636 def pin_subvolume_group(self, **kwargs):
637 ret = 0, "", ""
638 volname = kwargs['vol_name']
639 groupname = kwargs['group_name']
640 pin_type = kwargs['pin_type']
641 pin_setting = kwargs['pin_setting']
642
643 try:
644 with open_volume(self, volname) as fs_handle:
645 with open_group(fs_handle, self.volspec, groupname) as group:
646 group.pin(pin_type, pin_setting)
647 ret = 0, json.dumps({}), ""
648 except VolumeException as ve:
649 ret = self.volume_exception_to_retval(ve)
650 return ret
651
652 ### group snapshot
653
654 def create_subvolume_group_snapshot(self, **kwargs):
655 ret = -errno.ENOSYS, "", "subvolume group snapshots are not supported"
656 volname = kwargs['vol_name']
657 groupname = kwargs['group_name']
658 # snapname = kwargs['snap_name']
659
660 try:
661 with open_volume(self, volname) as fs_handle:
662 with open_group(fs_handle, self.volspec, groupname) as group:
663 # as subvolumes are marked with the vxattr ceph.dir.subvolume deny snapshots
664 # at the subvolume group (see: https://tracker.ceph.com/issues/46074)
665 # group.create_snapshot(snapname)
666 pass
667 except VolumeException as ve:
668 ret = self.volume_exception_to_retval(ve)
669 return ret
670
671 def remove_subvolume_group_snapshot(self, **kwargs):
672 ret = 0, "", ""
673 volname = kwargs['vol_name']
674 groupname = kwargs['group_name']
675 snapname = kwargs['snap_name']
676 force = kwargs['force']
677
678 try:
679 with open_volume(self, volname) as fs_handle:
680 with open_group(fs_handle, self.volspec, groupname) as group:
681 group.remove_snapshot(snapname)
682 except VolumeException as ve:
683 if not (ve.errno == -errno.ENOENT and force):
684 ret = self.volume_exception_to_retval(ve)
685 return ret
686
687 def list_subvolume_group_snapshots(self, **kwargs):
688 ret = 0, "", ""
689 volname = kwargs['vol_name']
690 groupname = kwargs['group_name']
691
692 try:
693 with open_volume(self, volname) as fs_handle:
694 with open_group(fs_handle, self.volspec, groupname) as group:
695 snapshots = group.list_snapshots()
696 ret = 0, name_to_json(snapshots), ""
697 except VolumeException as ve:
698 ret = self.volume_exception_to_retval(ve)
699 return ret