]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volumes.py
330b6cb941fe3a59db763e8fb9cfefb826c8b1ab
[ceph.git] / ceph / qa / tasks / cephfs / test_volumes.py
1 import os
2 import json
3 import time
4 import errno
5 import random
6 import logging
7 import collections
8 import uuid
9 import unittest
10 from hashlib import md5
11 from textwrap import dedent
12 from io import StringIO
13
14 from tasks.cephfs.cephfs_test_case import CephFSTestCase
15 from tasks.cephfs.fuse_mount import FuseMount
16 from teuthology.exceptions import CommandFailedError
17
18 log = logging.getLogger(__name__)
19
20 class TestVolumesHelper(CephFSTestCase):
21 """Helper class for testing FS volume, subvolume group and subvolume operations."""
22 TEST_VOLUME_PREFIX = "volume"
23 TEST_SUBVOLUME_PREFIX="subvolume"
24 TEST_GROUP_PREFIX="group"
25 TEST_SNAPSHOT_PREFIX="snapshot"
26 TEST_CLONE_PREFIX="clone"
27 TEST_FILE_NAME_PREFIX="subvolume_file"
28
29 # for filling subvolume with data
30 CLIENTS_REQUIRED = 2
31 MDSS_REQUIRED = 2
32
33 # io defaults
34 DEFAULT_FILE_SIZE = 1 # MB
35 DEFAULT_NUMBER_OF_FILES = 1024
36
37 def _fs_cmd(self, *args):
38 return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
39
40 def _raw_cmd(self, *args):
41 return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
42
43 def __check_clone_state(self, state, clone, clone_group=None, timo=120):
44 check = 0
45 args = ["clone", "status", self.volname, clone]
46 if clone_group:
47 args.append(clone_group)
48 args = tuple(args)
49 while check < timo:
50 result = json.loads(self._fs_cmd(*args))
51 if result["status"]["state"] == state:
52 break
53 check += 1
54 time.sleep(1)
55 self.assertTrue(check < timo)
56
57 def _get_clone_status(self, clone, clone_group=None):
58 args = ["clone", "status", self.volname, clone]
59 if clone_group:
60 args.append(clone_group)
61 args = tuple(args)
62 result = json.loads(self._fs_cmd(*args))
63 return result
64
65 def _wait_for_clone_to_complete(self, clone, clone_group=None, timo=120):
66 self.__check_clone_state("complete", clone, clone_group, timo)
67
68 def _wait_for_clone_to_fail(self, clone, clone_group=None, timo=120):
69 self.__check_clone_state("failed", clone, clone_group, timo)
70
71 def _wait_for_clone_to_be_in_progress(self, clone, clone_group=None, timo=120):
72 self.__check_clone_state("in-progress", clone, clone_group, timo)
73
74 def _check_clone_canceled(self, clone, clone_group=None):
75 self.__check_clone_state("canceled", clone, clone_group, timo=1)
76
77 def _get_subvolume_snapshot_path(self, subvolume, snapshot, source_group, subvol_path, source_version):
78 if source_version == 2:
79 # v2
80 if subvol_path is not None:
81 (base_path, uuid_str) = os.path.split(subvol_path)
82 else:
83 (base_path, uuid_str) = os.path.split(self._get_subvolume_path(self.volname, subvolume, group_name=source_group))
84 return os.path.join(base_path, ".snap", snapshot, uuid_str)
85
86 # v1
87 base_path = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
88 return os.path.join(base_path, ".snap", snapshot)
89
90 def _verify_clone_attrs(self, source_path, clone_path):
91 path1 = source_path
92 path2 = clone_path
93
94 p = self.mount_a.run_shell(["find", path1])
95 paths = p.stdout.getvalue().strip().split()
96
97 # for each entry in source and clone (sink) verify certain inode attributes:
98 # inode type, mode, ownership, [am]time.
99 for source_path in paths:
100 sink_entry = source_path[len(path1)+1:]
101 sink_path = os.path.join(path2, sink_entry)
102
103 # mode+type
104 sval = int(self.mount_a.run_shell(['stat', '-c' '%f', source_path]).stdout.getvalue().strip(), 16)
105 cval = int(self.mount_a.run_shell(['stat', '-c' '%f', sink_path]).stdout.getvalue().strip(), 16)
106 self.assertEqual(sval, cval)
107
108 # ownership
109 sval = int(self.mount_a.run_shell(['stat', '-c' '%u', source_path]).stdout.getvalue().strip())
110 cval = int(self.mount_a.run_shell(['stat', '-c' '%u', sink_path]).stdout.getvalue().strip())
111 self.assertEqual(sval, cval)
112
113 sval = int(self.mount_a.run_shell(['stat', '-c' '%g', source_path]).stdout.getvalue().strip())
114 cval = int(self.mount_a.run_shell(['stat', '-c' '%g', sink_path]).stdout.getvalue().strip())
115 self.assertEqual(sval, cval)
116
117 # inode timestamps
118 # do not check access as kclient will generally not update this like ceph-fuse will.
119 sval = int(self.mount_a.run_shell(['stat', '-c' '%Y', source_path]).stdout.getvalue().strip())
120 cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip())
121 self.assertEqual(sval, cval)
122
123 def _verify_clone_root(self, source_path, clone_path, clone, clone_group, clone_pool):
124 # verifies following clone root attrs quota, data_pool and pool_namespace
125 # remaining attributes of clone root are validated in _verify_clone_attrs
126
127 clone_info = json.loads(self._get_subvolume_info(self.volname, clone, clone_group))
128
129 # verify quota is inherited from source snapshot
130 src_quota = self.mount_a.getfattr(source_path, "ceph.quota.max_bytes")
131 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
132 if isinstance(self.mount_a, FuseMount):
133 self.assertEqual(clone_info["bytes_quota"], "infinite" if src_quota is None else int(src_quota))
134
135 if clone_pool:
136 # verify pool is set as per request
137 self.assertEqual(clone_info["data_pool"], clone_pool)
138 else:
139 # verify pool and pool namespace are inherited from snapshot
140 self.assertEqual(clone_info["data_pool"],
141 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool"))
142 self.assertEqual(clone_info["pool_namespace"],
143 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool_namespace"))
144
145 def _verify_clone(self, subvolume, snapshot, clone,
146 source_group=None, clone_group=None, clone_pool=None,
147 subvol_path=None, source_version=2, timo=120):
148 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
149 # but snapshots are retained for clone verification
150 path1 = self._get_subvolume_snapshot_path(subvolume, snapshot, source_group, subvol_path, source_version)
151 path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group)
152
153 check = 0
154 # TODO: currently snapshot rentries are not stable if snapshot source entries
155 # are removed, https://tracker.ceph.com/issues/46747
156 while check < timo and subvol_path is None:
157 val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries"))
158 val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries"))
159 if val1 == val2:
160 break
161 check += 1
162 time.sleep(1)
163 self.assertTrue(check < timo)
164
165 self._verify_clone_root(path1, path2, clone, clone_group, clone_pool)
166 self._verify_clone_attrs(path1, path2)
167
168 def _generate_random_volume_name(self, count=1):
169 n = self.volume_start
170 volumes = [f"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
171 self.volume_start += count
172 return volumes[0] if count == 1 else volumes
173
174 def _generate_random_subvolume_name(self, count=1):
175 n = self.subvolume_start
176 subvolumes = [f"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
177 self.subvolume_start += count
178 return subvolumes[0] if count == 1 else subvolumes
179
180 def _generate_random_group_name(self, count=1):
181 n = self.group_start
182 groups = [f"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i in range(n, n+count)]
183 self.group_start += count
184 return groups[0] if count == 1 else groups
185
186 def _generate_random_snapshot_name(self, count=1):
187 n = self.snapshot_start
188 snaps = [f"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i in range(n, n+count)]
189 self.snapshot_start += count
190 return snaps[0] if count == 1 else snaps
191
192 def _generate_random_clone_name(self, count=1):
193 n = self.clone_start
194 clones = [f"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i in range(n, n+count)]
195 self.clone_start += count
196 return clones[0] if count == 1 else clones
197
198 def _enable_multi_fs(self):
199 self._fs_cmd("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
200
201 def _create_or_reuse_test_volume(self):
202 result = json.loads(self._fs_cmd("volume", "ls"))
203 if len(result) == 0:
204 self.vol_created = True
205 self.volname = self._generate_random_volume_name()
206 self._fs_cmd("volume", "create", self.volname)
207 else:
208 self.volname = result[0]['name']
209
210 def _get_volume_info(self, vol_name):
211 args = ["volume", "info", vol_name]
212 args = tuple(args)
213 vol_md = self._fs_cmd(*args)
214 return vol_md
215
216 def _get_subvolume_group_path(self, vol_name, group_name):
217 args = ("subvolumegroup", "getpath", vol_name, group_name)
218 path = self._fs_cmd(*args)
219 # remove the leading '/', and trailing whitespaces
220 return path[1:].rstrip()
221
222 def _get_subvolume_group_info(self, vol_name, group_name):
223 args = ["subvolumegroup", "info", vol_name, group_name]
224 args = tuple(args)
225 group_md = self._fs_cmd(*args)
226 return group_md
227
228 def _get_subvolume_path(self, vol_name, subvol_name, group_name=None):
229 args = ["subvolume", "getpath", vol_name, subvol_name]
230 if group_name:
231 args.append(group_name)
232 args = tuple(args)
233 path = self._fs_cmd(*args)
234 # remove the leading '/', and trailing whitespaces
235 return path[1:].rstrip()
236
237 def _get_subvolume_info(self, vol_name, subvol_name, group_name=None):
238 args = ["subvolume", "info", vol_name, subvol_name]
239 if group_name:
240 args.append(group_name)
241 args = tuple(args)
242 subvol_md = self._fs_cmd(*args)
243 return subvol_md
244
245 def _get_subvolume_snapshot_info(self, vol_name, subvol_name, snapname, group_name=None):
246 args = ["subvolume", "snapshot", "info", vol_name, subvol_name, snapname]
247 if group_name:
248 args.append(group_name)
249 args = tuple(args)
250 snap_md = self._fs_cmd(*args)
251 return snap_md
252
253 def _delete_test_volume(self):
254 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
255
256 def _do_subvolume_pool_and_namespace_update(self, subvolume, pool=None, pool_namespace=None, subvolume_group=None):
257 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
258
259 if pool is not None:
260 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool, sudo=True)
261
262 if pool_namespace is not None:
263 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace, sudo=True)
264
265 def _do_subvolume_attr_update(self, subvolume, uid, gid, mode, subvolume_group=None):
266 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
267
268 # mode
269 self.mount_a.run_shell(['chmod', mode, subvolpath], sudo=True)
270
271 # ownership
272 self.mount_a.run_shell(['chown', uid, subvolpath], sudo=True)
273 self.mount_a.run_shell(['chgrp', gid, subvolpath], sudo=True)
274
275 def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None,
276 number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE):
277 # get subvolume path for IO
278 args = ["subvolume", "getpath", self.volname, subvolume]
279 if subvolume_group:
280 args.append(subvolume_group)
281 args = tuple(args)
282 subvolpath = self._fs_cmd(*args)
283 self.assertNotEqual(subvolpath, None)
284 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
285
286 io_path = subvolpath
287 if create_dir:
288 io_path = os.path.join(subvolpath, create_dir)
289 self.mount_a.run_shell_payload(f"mkdir -p {io_path}")
290
291 log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path))
292 for i in range(number_of_files):
293 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
294 self.mount_a.write_n_mb(os.path.join(io_path, filename), file_size)
295
296 def _do_subvolume_io_mixed(self, subvolume, subvolume_group=None):
297 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
298
299 reg_file = "regfile.0"
300 dir_path = os.path.join(subvolpath, "dir.0")
301 sym_path1 = os.path.join(subvolpath, "sym.0")
302 # this symlink's ownership would be changed
303 sym_path2 = os.path.join(dir_path, "sym.0")
304
305 self.mount_a.run_shell(["mkdir", dir_path])
306 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path1])
307 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path2])
308 # flip ownership to nobody. assumption: nobody's id is 65534
309 self.mount_a.run_shell(["chown", "-h", "65534:65534", sym_path2], sudo=True, omit_sudo=False)
310
311 def _wait_for_trash_empty(self, timeout=60):
312 # XXX: construct the trash dir path (note that there is no mgr
313 # [sub]volume interface for this).
314 trashdir = os.path.join("./", "volumes", "_deleting")
315 self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
316
317 def _wait_for_subvol_trash_empty(self, subvol, group="_nogroup", timeout=30):
318 trashdir = os.path.join("./", "volumes", group, subvol, ".trash")
319 try:
320 self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
321 except CommandFailedError as ce:
322 if ce.exitstatus != errno.ENOENT:
323 pass
324 else:
325 raise
326
327 def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False):
328 if legacy:
329 subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group)
330 m = md5()
331 m.update(("/"+subvol_path).encode('utf-8'))
332 meta_filename = "{0}.meta".format(m.digest().hex())
333 metapath = os.path.join(".", "volumes", "_legacy", meta_filename)
334 else:
335 group = subvol_group if subvol_group is not None else '_nogroup'
336 metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
337
338 out = self.mount_a.run_shell(['cat', metapath], sudo=True)
339 lines = out.stdout.getvalue().strip().split('\n')
340 sv_version = -1
341 for line in lines:
342 if line == "version = " + str(version):
343 sv_version = version
344 break
345 self.assertEqual(sv_version, version, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
346 version, sv_version, metapath))
347
348 def _create_v1_subvolume(self, subvol_name, subvol_group=None, has_snapshot=True, subvol_type='subvolume', state='complete'):
349 group = subvol_group if subvol_group is not None else '_nogroup'
350 basepath = os.path.join("volumes", group, subvol_name)
351 uuid_str = str(uuid.uuid4())
352 createpath = os.path.join(basepath, uuid_str)
353 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
354
355 # create a v1 snapshot, to prevent auto upgrades
356 if has_snapshot:
357 snappath = os.path.join(createpath, ".snap", "fake")
358 self.mount_a.run_shell(['mkdir', '-p', snappath], sudo=True)
359
360 # add required xattrs to subvolume
361 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
362 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
363
364 # create a v1 .meta file
365 meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, "/" + createpath, state)
366 if state == 'pending':
367 # add a fake clone source
368 meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
369 meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta")
370 self.mount_a.client_remote.write_file(meta_filepath1, meta_contents, sudo=True)
371 return createpath
372
373 def _update_fake_trash(self, subvol_name, subvol_group=None, trash_name='fake', create=True):
374 group = subvol_group if subvol_group is not None else '_nogroup'
375 trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name)
376 if create:
377 self.mount_a.run_shell(['mkdir', '-p', trashpath], sudo=True)
378 else:
379 self.mount_a.run_shell(['rmdir', trashpath], sudo=True)
380
381 def _configure_guest_auth(self, guest_mount, authid, key):
382 """
383 Set up auth credentials for a guest client.
384 """
385 # Create keyring file for the guest client.
386 keyring_txt = dedent("""
387 [client.{authid}]
388 key = {key}
389
390 """.format(authid=authid,key=key))
391
392 guest_mount.client_id = authid
393 guest_mount.client_remote.write_file(guest_mount.get_keyring_path(),
394 keyring_txt, sudo=True)
395 # Add a guest client section to the ceph config file.
396 self.config_set("client.{0}".format(authid), "debug client", 20)
397 self.config_set("client.{0}".format(authid), "debug objecter", 20)
398 self.set_conf("client.{0}".format(authid),
399 "keyring", guest_mount.get_keyring_path())
400
401 def _auth_metadata_get(self, filedata):
402 """
403 Return a deserialized JSON object, or None
404 """
405 try:
406 data = json.loads(filedata)
407 except json.decoder.JSONDecodeError:
408 data = None
409 return data
410
411 def setUp(self):
412 super(TestVolumesHelper, self).setUp()
413 self.volname = None
414 self.vol_created = False
415 self._enable_multi_fs()
416 self._create_or_reuse_test_volume()
417 self.config_set('mon', 'mon_allow_pool_delete', True)
418 self.volume_start = random.randint(1, (1<<20))
419 self.subvolume_start = random.randint(1, (1<<20))
420 self.group_start = random.randint(1, (1<<20))
421 self.snapshot_start = random.randint(1, (1<<20))
422 self.clone_start = random.randint(1, (1<<20))
423
424 def tearDown(self):
425 if self.vol_created:
426 self._delete_test_volume()
427 super(TestVolumesHelper, self).tearDown()
428
429
430 class TestVolumes(TestVolumesHelper):
431 """Tests for FS volume operations."""
432 def test_volume_create(self):
433 """
434 That the volume can be created and then cleans up
435 """
436 volname = self._generate_random_volume_name()
437 self._fs_cmd("volume", "create", volname)
438 volumels = json.loads(self._fs_cmd("volume", "ls"))
439
440 if not (volname in ([volume['name'] for volume in volumels])):
441 raise RuntimeError("Error creating volume '{0}'".format(volname))
442 else:
443 # clean up
444 self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
445
446 def test_volume_ls(self):
447 """
448 That the existing and the newly created volumes can be listed and
449 finally cleans up.
450 """
451 vls = json.loads(self._fs_cmd("volume", "ls"))
452 volumes = [volume['name'] for volume in vls]
453
454 #create new volumes and add it to the existing list of volumes
455 volumenames = self._generate_random_volume_name(2)
456 for volumename in volumenames:
457 self._fs_cmd("volume", "create", volumename)
458 volumes.extend(volumenames)
459
460 # list volumes
461 try:
462 volumels = json.loads(self._fs_cmd('volume', 'ls'))
463 if len(volumels) == 0:
464 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
465 else:
466 volnames = [volume['name'] for volume in volumels]
467 if collections.Counter(volnames) != collections.Counter(volumes):
468 raise RuntimeError("Error creating or listing volumes")
469 finally:
470 # clean up
471 for volume in volumenames:
472 self._fs_cmd("volume", "rm", volume, "--yes-i-really-mean-it")
473
474 def test_volume_rm(self):
475 """
476 That the volume can only be removed when --yes-i-really-mean-it is used
477 and verify that the deleted volume is not listed anymore.
478 """
479 for m in self.mounts:
480 m.umount_wait()
481 try:
482 self._fs_cmd("volume", "rm", self.volname)
483 except CommandFailedError as ce:
484 if ce.exitstatus != errno.EPERM:
485 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
486 "but it failed with {0}".format(ce.exitstatus))
487 else:
488 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
489
490 #check if it's gone
491 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
492 if (self.volname in [volume['name'] for volume in volumes]):
493 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
494 "The volume {0} not removed.".format(self.volname))
495 else:
496 raise RuntimeError("expected the 'fs volume rm' command to fail.")
497
498 def test_volume_rm_arbitrary_pool_removal(self):
499 """
500 That the arbitrary pool added to the volume out of band is removed
501 successfully on volume removal.
502 """
503 for m in self.mounts:
504 m.umount_wait()
505 new_pool = "new_pool"
506 # add arbitrary data pool
507 self.fs.add_data_pool(new_pool)
508 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
509 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
510
511 #check if fs is gone
512 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
513 volnames = [volume['name'] for volume in volumes]
514 self.assertNotIn(self.volname, volnames)
515
516 #check if osd pools are gone
517 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
518 for pool in vol_status["pools"]:
519 self.assertNotIn(pool["name"], pools)
520
521 def test_volume_rm_when_mon_delete_pool_false(self):
522 """
523 That the volume can only be removed when mon_allowd_pool_delete is set
524 to true and verify that the pools are removed after volume deletion.
525 """
526 for m in self.mounts:
527 m.umount_wait()
528 self.config_set('mon', 'mon_allow_pool_delete', False)
529 try:
530 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
531 except CommandFailedError as ce:
532 self.assertEqual(ce.exitstatus, errno.EPERM,
533 "expected the 'fs volume rm' command to fail with EPERM, "
534 "but it failed with {0}".format(ce.exitstatus))
535 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
536 self.config_set('mon', 'mon_allow_pool_delete', True)
537 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
538
539 #check if fs is gone
540 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
541 volnames = [volume['name'] for volume in volumes]
542 self.assertNotIn(self.volname, volnames,
543 "volume {0} exists after removal".format(self.volname))
544 #check if pools are gone
545 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
546 for pool in vol_status["pools"]:
547 self.assertNotIn(pool["name"], pools,
548 "pool {0} exists after volume removal".format(pool["name"]))
549
550 def test_volume_rename(self):
551 """
552 That volume, its file system and pools, can be renamed.
553 """
554 for m in self.mounts:
555 m.umount_wait()
556 oldvolname = self.volname
557 newvolname = self._generate_random_volume_name()
558 new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
559 self._fs_cmd("volume", "rename", oldvolname, newvolname,
560 "--yes-i-really-mean-it")
561 volumels = json.loads(self._fs_cmd('volume', 'ls'))
562 volnames = [volume['name'] for volume in volumels]
563 # volume name changed
564 self.assertIn(newvolname, volnames)
565 self.assertNotIn(oldvolname, volnames)
566 # pool names changed
567 self.fs.get_pool_names(refresh=True)
568 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
569 self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
570
571 def test_volume_rename_idempotency(self):
572 """
573 That volume rename is idempotent.
574 """
575 for m in self.mounts:
576 m.umount_wait()
577 oldvolname = self.volname
578 newvolname = self._generate_random_volume_name()
579 new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
580 self._fs_cmd("volume", "rename", oldvolname, newvolname,
581 "--yes-i-really-mean-it")
582 self._fs_cmd("volume", "rename", oldvolname, newvolname,
583 "--yes-i-really-mean-it")
584 volumels = json.loads(self._fs_cmd('volume', 'ls'))
585 volnames = [volume['name'] for volume in volumels]
586 self.assertIn(newvolname, volnames)
587 self.assertNotIn(oldvolname, volnames)
588 self.fs.get_pool_names(refresh=True)
589 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
590 self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
591
592 def test_volume_rename_fails_without_confirmation_flag(self):
593 """
594 That renaming volume fails without --yes-i-really-mean-it flag.
595 """
596 newvolname = self._generate_random_volume_name()
597 try:
598 self._fs_cmd("volume", "rename", self.volname, newvolname)
599 except CommandFailedError as ce:
600 self.assertEqual(ce.exitstatus, errno.EPERM,
601 "invalid error code on renaming a FS volume without the "
602 "'--yes-i-really-mean-it' flag")
603 else:
604 self.fail("expected renaming of FS volume to fail without the "
605 "'--yes-i-really-mean-it' flag")
606
607 def test_volume_rename_for_more_than_one_data_pool(self):
608 """
609 That renaming a volume with more than one data pool does not change
610 the name of the data pools.
611 """
612 for m in self.mounts:
613 m.umount_wait()
614 self.fs.add_data_pool('another-data-pool')
615 oldvolname = self.volname
616 newvolname = self._generate_random_volume_name()
617 self.fs.get_pool_names(refresh=True)
618 orig_data_pool_names = list(self.fs.data_pools.values())
619 new_metadata_pool = f"cephfs.{newvolname}.meta"
620 self._fs_cmd("volume", "rename", self.volname, newvolname,
621 "--yes-i-really-mean-it")
622 volumels = json.loads(self._fs_cmd('volume', 'ls'))
623 volnames = [volume['name'] for volume in volumels]
624 # volume name changed
625 self.assertIn(newvolname, volnames)
626 self.assertNotIn(oldvolname, volnames)
627 self.fs.get_pool_names(refresh=True)
628 # metadata pool name changed
629 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
630 # data pool names unchanged
631 self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values()))
632
633 def test_volume_info(self):
634 """
635 Tests the 'fs volume info' command
636 """
637 vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
638 group = self._generate_random_group_name()
639 # create subvolumegroup
640 self._fs_cmd("subvolumegroup", "create", self.volname, group)
641 # get volume metadata
642 vol_info = json.loads(self._get_volume_info(self.volname))
643 for md in vol_fields:
644 self.assertIn(md, vol_info,
645 f"'{md}' key not present in metadata of volume")
646 self.assertEqual(vol_info["used_size"], 0,
647 "Size should be zero when volumes directory is empty")
648
649 def test_volume_info_without_subvolumegroup(self):
650 """
651 Tests the 'fs volume info' command without subvolume group
652 """
653 vol_fields = ["pools", "mon_addrs"]
654 # get volume metadata
655 vol_info = json.loads(self._get_volume_info(self.volname))
656 for md in vol_fields:
657 self.assertIn(md, vol_info,
658 f"'{md}' key not present in metadata of volume")
659 self.assertNotIn("used_size", vol_info,
660 "'used_size' should not be present in absence of subvolumegroup")
661 self.assertNotIn("pending_subvolume_deletions", vol_info,
662 "'pending_subvolume_deletions' should not be present in absence"
663 " of subvolumegroup")
664
665
666 class TestSubvolumeGroups(TestVolumesHelper):
667 """Tests for FS subvolume group operations."""
668 def test_default_uid_gid_subvolume_group(self):
669 group = self._generate_random_group_name()
670 expected_uid = 0
671 expected_gid = 0
672
673 # create group
674 self._fs_cmd("subvolumegroup", "create", self.volname, group)
675 group_path = self._get_subvolume_group_path(self.volname, group)
676
677 # check group's uid and gid
678 stat = self.mount_a.stat(group_path)
679 self.assertEqual(stat['st_uid'], expected_uid)
680 self.assertEqual(stat['st_gid'], expected_gid)
681
682 # remove group
683 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
684
685 def test_nonexistent_subvolume_group_create(self):
686 subvolume = self._generate_random_subvolume_name()
687 group = "non_existent_group"
688
689 # try, creating subvolume in a nonexistent group
690 try:
691 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
692 except CommandFailedError as ce:
693 if ce.exitstatus != errno.ENOENT:
694 raise
695 else:
696 raise RuntimeError("expected the 'fs subvolume create' command to fail")
697
698 def test_nonexistent_subvolume_group_rm(self):
699 group = "non_existent_group"
700
701 # try, remove subvolume group
702 try:
703 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
704 except CommandFailedError as ce:
705 if ce.exitstatus != errno.ENOENT:
706 raise
707 else:
708 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
709
710 def test_subvolume_group_create_with_auto_cleanup_on_fail(self):
711 group = self._generate_random_group_name()
712 data_pool = "invalid_pool"
713 # create group with invalid data pool layout
714 with self.assertRaises(CommandFailedError):
715 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
716
717 # check whether group path is cleaned up
718 try:
719 self._fs_cmd("subvolumegroup", "getpath", self.volname, group)
720 except CommandFailedError as ce:
721 if ce.exitstatus != errno.ENOENT:
722 raise
723 else:
724 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
725
726 def test_subvolume_group_create_with_desired_data_pool_layout(self):
727 group1, group2 = self._generate_random_group_name(2)
728
729 # create group
730 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
731 group1_path = self._get_subvolume_group_path(self.volname, group1)
732
733 default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool")
734 new_pool = "new_pool"
735 self.assertNotEqual(default_pool, new_pool)
736
737 # add data pool
738 newid = self.fs.add_data_pool(new_pool)
739
740 # create group specifying the new data pool as its pool layout
741 self._fs_cmd("subvolumegroup", "create", self.volname, group2,
742 "--pool_layout", new_pool)
743 group2_path = self._get_subvolume_group_path(self.volname, group2)
744
745 desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool")
746 try:
747 self.assertEqual(desired_pool, new_pool)
748 except AssertionError:
749 self.assertEqual(int(desired_pool), newid) # old kernel returns id
750
751 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
752 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
753
754 def test_subvolume_group_create_with_desired_mode(self):
755 group1, group2 = self._generate_random_group_name(2)
756 # default mode
757 expected_mode1 = "755"
758 # desired mode
759 expected_mode2 = "777"
760
761 # create group
762 self._fs_cmd("subvolumegroup", "create", self.volname, group2, f"--mode={expected_mode2}")
763 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
764
765 group1_path = self._get_subvolume_group_path(self.volname, group1)
766 group2_path = self._get_subvolume_group_path(self.volname, group2)
767 volumes_path = os.path.dirname(group1_path)
768
769 # check group's mode
770 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip()
771 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip()
772 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
773 self.assertEqual(actual_mode1, expected_mode1)
774 self.assertEqual(actual_mode2, expected_mode2)
775 self.assertEqual(actual_mode3, expected_mode1)
776
777 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
778 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
779
780 def test_subvolume_group_create_with_desired_uid_gid(self):
781 """
782 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
783 expected values.
784 """
785 uid = 1000
786 gid = 1000
787
788 # create subvolume group
789 subvolgroupname = self._generate_random_group_name()
790 self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid))
791
792 # make sure it exists
793 subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname)
794 self.assertNotEqual(subvolgrouppath, None)
795
796 # verify the uid and gid
797 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip())
798 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip())
799 self.assertEqual(uid, suid)
800 self.assertEqual(gid, sgid)
801
802 # remove group
803 self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname)
804
805 def test_subvolume_group_create_with_invalid_data_pool_layout(self):
806 group = self._generate_random_group_name()
807 data_pool = "invalid_pool"
808 # create group with invalid data pool layout
809 try:
810 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
811 except CommandFailedError as ce:
812 if ce.exitstatus != errno.EINVAL:
813 raise
814 else:
815 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
816
817 def test_subvolume_group_create_with_size(self):
818 # create group with size -- should set quota
819 group = self._generate_random_group_name()
820 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
821
822 # get group metadata
823 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
824 self.assertEqual(group_info["bytes_quota"], 1000000000)
825
826 # remove group
827 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
828
829 def test_subvolume_group_info(self):
830 # tests the 'fs subvolumegroup info' command
831
832 group_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
833 "data_pool", "gid", "mode", "mon_addrs", "mtime", "uid"]
834
835 # create group
836 group = self._generate_random_group_name()
837 self._fs_cmd("subvolumegroup", "create", self.volname, group)
838
839 # get group metadata
840 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
841 for md in group_md:
842 self.assertIn(md, group_info, "'{0}' key not present in metadata of group".format(md))
843
844 self.assertEqual(group_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
845 self.assertEqual(group_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
846 self.assertEqual(group_info["uid"], 0)
847 self.assertEqual(group_info["gid"], 0)
848
849 nsize = self.DEFAULT_FILE_SIZE*1024*1024
850 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
851
852 # get group metadata after quota set
853 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
854 for md in group_md:
855 self.assertIn(md, group_info, "'{0}' key not present in metadata of subvolume".format(md))
856
857 self.assertNotEqual(group_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is set")
858 self.assertEqual(group_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
859
860 # remove group
861 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
862
863 def test_subvolume_group_create_idempotence(self):
864 # create group
865 group = self._generate_random_group_name()
866 self._fs_cmd("subvolumegroup", "create", self.volname, group)
867
868 # try creating w/ same subvolume group name -- should be idempotent
869 self._fs_cmd("subvolumegroup", "create", self.volname, group)
870
871 # remove group
872 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
873
874 def test_subvolume_group_create_idempotence_mode(self):
875 # create group
876 group = self._generate_random_group_name()
877 self._fs_cmd("subvolumegroup", "create", self.volname, group)
878
879 # try creating w/ same subvolume group name with mode -- should set mode
880 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=766")
881
882 group_path = self._get_subvolume_group_path(self.volname, group)
883
884 # check subvolumegroup's mode
885 mode = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip()
886 self.assertEqual(mode, "766")
887
888 # remove group
889 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
890
891 def test_subvolume_group_create_idempotence_uid_gid(self):
892 desired_uid = 1000
893 desired_gid = 1000
894
895 # create group
896 group = self._generate_random_group_name()
897 self._fs_cmd("subvolumegroup", "create", self.volname, group)
898
899 # try creating w/ same subvolume group name with uid/gid -- should set uid/gid
900 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--uid", str(desired_uid), "--gid", str(desired_gid))
901
902 group_path = self._get_subvolume_group_path(self.volname, group)
903
904 # verify the uid and gid
905 actual_uid = int(self.mount_a.run_shell(['stat', '-c' '%u', group_path]).stdout.getvalue().strip())
906 actual_gid = int(self.mount_a.run_shell(['stat', '-c' '%g', group_path]).stdout.getvalue().strip())
907 self.assertEqual(desired_uid, actual_uid)
908 self.assertEqual(desired_gid, actual_gid)
909
910 # remove group
911 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
912
913 def test_subvolume_group_create_idempotence_data_pool(self):
914 # create group
915 group = self._generate_random_group_name()
916 self._fs_cmd("subvolumegroup", "create", self.volname, group)
917
918 group_path = self._get_subvolume_group_path(self.volname, group)
919
920 default_pool = self.mount_a.getfattr(group_path, "ceph.dir.layout.pool")
921 new_pool = "new_pool"
922 self.assertNotEqual(default_pool, new_pool)
923
924 # add data pool
925 newid = self.fs.add_data_pool(new_pool)
926
927 # try creating w/ same subvolume group name with new data pool -- should set pool
928 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", new_pool)
929 desired_pool = self.mount_a.getfattr(group_path, "ceph.dir.layout.pool")
930 try:
931 self.assertEqual(desired_pool, new_pool)
932 except AssertionError:
933 self.assertEqual(int(desired_pool), newid) # old kernel returns id
934
935 # remove group
936 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
937
938 def test_subvolume_group_create_idempotence_resize(self):
939 # create group
940 group = self._generate_random_group_name()
941 self._fs_cmd("subvolumegroup", "create", self.volname, group)
942
943 # try creating w/ same subvolume name with size -- should set quota
944 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
945
946 # get group metadata
947 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
948 self.assertEqual(group_info["bytes_quota"], 1000000000)
949
950 # remove group
951 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
952
953 def test_subvolume_group_quota_mds_path_restriction_to_group_path(self):
954 """
955 Tests subvolumegroup quota enforcement with mds path restriction set to group.
956 For quota to be enforced, read permission needs to be provided to the parent
957 of the directory on which quota is set. Please see the tracker comment [1]
958 [1] https://tracker.ceph.com/issues/55090#note-8
959 """
960 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
961 # create group with 100MB quota
962 group = self._generate_random_group_name()
963 self._fs_cmd("subvolumegroup", "create", self.volname, group,
964 "--size", str(osize), "--mode=777")
965
966 # make sure it exists
967 grouppath = self._get_subvolume_group_path(self.volname, group)
968 self.assertNotEqual(grouppath, None)
969
970 # create subvolume under the group
971 subvolname = self._generate_random_subvolume_name()
972 self._fs_cmd("subvolume", "create", self.volname, subvolname,
973 "--group_name", group, "--mode=777")
974
975 # make sure it exists
976 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
977 self.assertNotEqual(subvolpath, None)
978
979 # Create auth_id
980 authid = "client.guest1"
981 user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
982 "auth", "get-or-create", authid,
983 "mds", "allow rw path=/volumes",
984 "mgr", "allow rw",
985 "osd", "allow rw tag cephfs *=*",
986 "mon", "allow r",
987 "--format=json-pretty"
988 ))
989
990 # Prepare guest_mount with new authid
991 guest_mount = self.mount_b
992 guest_mount.umount_wait()
993
994 # configure credentials for guest client
995 self._configure_guest_auth(guest_mount, "guest1", user[0]["key"])
996
997 # mount the subvolume
998 mount_path = os.path.join("/", subvolpath)
999 guest_mount.mount_wait(cephfs_mntpt=mount_path)
1000
1001 # create 99 files of 1MB
1002 guest_mount.run_shell_payload("mkdir -p dir1")
1003 for i in range(99):
1004 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1005 guest_mount.write_n_mb(os.path.join("dir1", filename), self.DEFAULT_FILE_SIZE)
1006 try:
1007 # write two files of 1MB file to exceed the quota
1008 guest_mount.run_shell_payload("mkdir -p dir2")
1009 for i in range(2):
1010 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1011 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1012 # For quota to be enforced
1013 time.sleep(60)
1014 # create 400 files of 1MB to exceed quota
1015 for i in range(400):
1016 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1017 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1018 # Sometimes quota enforcement takes time.
1019 if i == 200:
1020 time.sleep(60)
1021 except CommandFailedError:
1022 pass
1023 else:
1024 self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1025
1026 # clean up
1027 guest_mount.umount_wait()
1028
1029 # Delete the subvolume
1030 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1031
1032 # remove group
1033 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1034
1035 # verify trash dir is clean
1036 self._wait_for_trash_empty()
1037
1038 def test_subvolume_group_quota_mds_path_restriction_to_subvolume_path(self):
1039 """
1040 Tests subvolumegroup quota enforcement with mds path restriction set to subvolume path
1041 The quota should not be enforced because of the fourth limitation mentioned at
1042 https://docs.ceph.com/en/latest/cephfs/quota/#limitations
1043 """
1044 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1045 # create group with 100MB quota
1046 group = self._generate_random_group_name()
1047 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1048 "--size", str(osize), "--mode=777")
1049
1050 # make sure it exists
1051 grouppath = self._get_subvolume_group_path(self.volname, group)
1052 self.assertNotEqual(grouppath, None)
1053
1054 # create subvolume under the group
1055 subvolname = self._generate_random_subvolume_name()
1056 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1057 "--group_name", group, "--mode=777")
1058
1059 # make sure it exists
1060 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1061 self.assertNotEqual(subvolpath, None)
1062
1063 mount_path = os.path.join("/", subvolpath)
1064
1065 # Create auth_id
1066 authid = "client.guest1"
1067 user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
1068 "auth", "get-or-create", authid,
1069 "mds", f"allow rw path={mount_path}",
1070 "mgr", "allow rw",
1071 "osd", "allow rw tag cephfs *=*",
1072 "mon", "allow r",
1073 "--format=json-pretty"
1074 ))
1075
1076 # Prepare guest_mount with new authid
1077 guest_mount = self.mount_b
1078 guest_mount.umount_wait()
1079
1080 # configure credentials for guest client
1081 self._configure_guest_auth(guest_mount, "guest1", user[0]["key"])
1082
1083 # mount the subvolume
1084 guest_mount.mount_wait(cephfs_mntpt=mount_path)
1085
1086 # create 99 files of 1MB to exceed quota
1087 guest_mount.run_shell_payload("mkdir -p dir1")
1088 for i in range(99):
1089 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1090 guest_mount.write_n_mb(os.path.join("dir1", filename), self.DEFAULT_FILE_SIZE)
1091 try:
1092 # write two files of 1MB file to exceed the quota
1093 guest_mount.run_shell_payload("mkdir -p dir2")
1094 for i in range(2):
1095 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1096 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1097 # For quota to be enforced
1098 time.sleep(60)
1099 # create 400 files of 1MB to exceed quota
1100 for i in range(400):
1101 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1102 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1103 # Sometimes quota enforcement takes time.
1104 if i == 200:
1105 time.sleep(60)
1106 except CommandFailedError:
1107 self.fail(f"Quota should not be enforced, expected filling subvolume {subvolname} with 400 files of size 1MB to succeed")
1108
1109 # clean up
1110 guest_mount.umount_wait()
1111
1112 # Delete the subvolume
1113 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1114
1115 # remove group
1116 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1117
1118 # verify trash dir is clean
1119 self._wait_for_trash_empty()
1120
1121 def test_subvolume_group_quota_exceeded_subvolume_removal(self):
1122 """
1123 Tests subvolume removal if it's group quota is exceeded
1124 """
1125 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1126 # create group with 100MB quota
1127 group = self._generate_random_group_name()
1128 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1129 "--size", str(osize), "--mode=777")
1130
1131 # make sure it exists
1132 grouppath = self._get_subvolume_group_path(self.volname, group)
1133 self.assertNotEqual(grouppath, None)
1134
1135 # create subvolume under the group
1136 subvolname = self._generate_random_subvolume_name()
1137 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1138 "--group_name", group, "--mode=777")
1139
1140 # make sure it exists
1141 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1142 self.assertNotEqual(subvolpath, None)
1143
1144 # create 99 files of 1MB to exceed quota
1145 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99)
1146
1147 try:
1148 # write two files of 1MB file to exceed the quota
1149 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1150 # For quota to be enforced
1151 time.sleep(20)
1152 # create 400 files of 1MB to exceed quota
1153 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=400)
1154 except CommandFailedError:
1155 # Delete subvolume when group quota is exceeded
1156 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1157 else:
1158 self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1159
1160 # remove group
1161 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1162
1163 # verify trash dir is clean
1164 self._wait_for_trash_empty()
1165
1166 def test_subvolume_group_quota_exceeded_subvolume_removal_retained_snaps(self):
1167 """
1168 Tests retained snapshot subvolume removal if it's group quota is exceeded
1169 """
1170 group = self._generate_random_group_name()
1171 subvolname = self._generate_random_subvolume_name()
1172 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
1173
1174 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1175 # create group with 100MB quota
1176 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1177 "--size", str(osize), "--mode=777")
1178
1179 # make sure it exists
1180 grouppath = self._get_subvolume_group_path(self.volname, group)
1181 self.assertNotEqual(grouppath, None)
1182
1183 # create subvolume under the group
1184 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1185 "--group_name", group, "--mode=777")
1186
1187 # make sure it exists
1188 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1189 self.assertNotEqual(subvolpath, None)
1190
1191 # create 99 files of 1MB to exceed quota
1192 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99)
1193
1194 # snapshot subvolume
1195 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot1, "--group_name", group)
1196 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot2, "--group_name", group)
1197
1198 try:
1199 # write two files of 1MB file to exceed the quota
1200 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1201 # For quota to be enforced
1202 time.sleep(20)
1203 # create 400 files of 1MB to exceed quota
1204 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=400)
1205 except CommandFailedError:
1206 # remove with snapshot retention
1207 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group, "--retain-snapshots")
1208 # remove snapshot1
1209 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot1, "--group_name", group)
1210 # remove snapshot2 (should remove volume)
1211 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot2, "--group_name", group)
1212 # verify subvolume trash is clean
1213 self._wait_for_subvol_trash_empty(subvolname, group=group)
1214 else:
1215 self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1216
1217 # remove group
1218 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1219
1220 # verify trash dir is clean
1221 self._wait_for_trash_empty()
1222
1223 def test_subvolume_group_quota_subvolume_removal(self):
1224 """
1225 Tests subvolume removal if it's group quota is set.
1226 """
1227 # create group with size -- should set quota
1228 group = self._generate_random_group_name()
1229 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
1230
1231 # create subvolume under the group
1232 subvolname = self._generate_random_subvolume_name()
1233 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
1234
1235 # remove subvolume
1236 try:
1237 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1238 except CommandFailedError:
1239 self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1240
1241 # remove subvolumegroup
1242 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1243
1244 # verify trash dir is clean
1245 self._wait_for_trash_empty()
1246
1247 def test_subvolume_group_quota_legacy_subvolume_removal(self):
1248 """
1249 Tests legacy subvolume removal if it's group quota is set.
1250 """
1251 subvolume = self._generate_random_subvolume_name()
1252 group = self._generate_random_group_name()
1253
1254 # emulate a old-fashioned subvolume -- in a custom group
1255 createpath1 = os.path.join(".", "volumes", group, subvolume)
1256 self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
1257
1258 # this would auto-upgrade on access without anyone noticing
1259 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, "--group-name", group)
1260 self.assertNotEqual(subvolpath1, None)
1261 subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
1262
1263 # and... the subvolume path returned should be what we created behind the scene
1264 self.assertEqual(createpath1[1:], subvolpath1)
1265
1266 # Set subvolumegroup quota on idempotent subvolumegroup creation
1267 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
1268
1269 # remove subvolume
1270 try:
1271 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1272 except CommandFailedError:
1273 self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1274
1275 # remove subvolumegroup
1276 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1277
1278 # verify trash dir is clean
1279 self._wait_for_trash_empty()
1280
1281 def test_subvolume_group_quota_v1_subvolume_removal(self):
1282 """
1283 Tests v1 subvolume removal if it's group quota is set.
1284 """
1285 subvolume = self._generate_random_subvolume_name()
1286 group = self._generate_random_group_name()
1287
1288 # emulate a v1 subvolume -- in a custom group
1289 self._create_v1_subvolume(subvolume, subvol_group=group, has_snapshot=False)
1290
1291 # Set subvolumegroup quota on idempotent subvolumegroup creation
1292 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
1293
1294 # remove subvolume
1295 try:
1296 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1297 except CommandFailedError:
1298 self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1299
1300 # remove subvolumegroup
1301 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1302
1303 # verify trash dir is clean
1304 self._wait_for_trash_empty()
1305
1306 def test_subvolume_group_resize_fail_invalid_size(self):
1307 """
1308 That a subvolume group cannot be resized to an invalid size and the quota did not change
1309 """
1310
1311 osize = self.DEFAULT_FILE_SIZE*1024*1024
1312 # create group with 1MB quota
1313 group = self._generate_random_group_name()
1314 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--size", str(osize))
1315
1316 # make sure it exists
1317 grouppath = self._get_subvolume_group_path(self.volname, group)
1318 self.assertNotEqual(grouppath, None)
1319
1320 # try to resize the subvolume with an invalid size -10
1321 nsize = -10
1322 try:
1323 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1324 except CommandFailedError as ce:
1325 self.assertEqual(ce.exitstatus, errno.EINVAL,
1326 "invalid error code on resize of subvolume group with invalid size")
1327 else:
1328 self.fail("expected the 'fs subvolumegroup resize' command to fail")
1329
1330 # verify the quota did not change
1331 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1332 self.assertEqual(size, osize)
1333
1334 # remove group
1335 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1336
1337 def test_subvolume_group_resize_fail_zero_size(self):
1338 """
1339 That a subvolume group cannot be resized to a zero size and the quota did not change
1340 """
1341
1342 osize = self.DEFAULT_FILE_SIZE*1024*1024
1343 # create group with 1MB quota
1344 group = self._generate_random_group_name()
1345 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--size", str(osize))
1346
1347 # make sure it exists
1348 grouppath = self._get_subvolume_group_path(self.volname, group)
1349 self.assertNotEqual(grouppath, None)
1350
1351 # try to resize the subvolume group with size 0
1352 nsize = 0
1353 try:
1354 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1355 except CommandFailedError as ce:
1356 self.assertEqual(ce.exitstatus, errno.EINVAL,
1357 "invalid error code on resize of subvolume group with invalid size")
1358 else:
1359 self.fail("expected the 'fs subvolumegroup resize' command to fail")
1360
1361 # verify the quota did not change
1362 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1363 self.assertEqual(size, osize)
1364
1365 # remove group
1366 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1367
1368 def test_subvolume_group_resize_quota_lt_used_size(self):
1369 """
1370 That a subvolume group can be resized to a size smaller than the current used size
1371 and the resulting quota matches the expected size.
1372 """
1373
1374 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
1375 # create group with 20MB quota
1376 group = self._generate_random_group_name()
1377 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1378 "--size", str(osize), "--mode=777")
1379
1380 # make sure it exists
1381 grouppath = self._get_subvolume_group_path(self.volname, group)
1382 self.assertNotEqual(grouppath, None)
1383
1384 # create subvolume under the group
1385 subvolname = self._generate_random_subvolume_name()
1386 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1387 "--group_name", group, "--mode=777")
1388
1389 # make sure it exists
1390 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1391 self.assertNotEqual(subvolpath, None)
1392
1393 # create one file of 10MB
1394 file_size=self.DEFAULT_FILE_SIZE*10
1395 number_of_files=1
1396 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
1397 number_of_files,
1398 file_size))
1399 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
1400 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
1401
1402 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
1403
1404 # shrink the subvolume group
1405 nsize = usedsize // 2
1406 try:
1407 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1408 except CommandFailedError:
1409 self.fail("expected the 'fs subvolumegroup resize' command to succeed")
1410
1411 # verify the quota
1412 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1413 self.assertEqual(size, nsize)
1414
1415 # remove subvolume and group
1416 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1417 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1418
1419 # verify trash dir is clean
1420 self._wait_for_trash_empty()
1421
1422 def test_subvolume_group_resize_fail_quota_lt_used_size_no_shrink(self):
1423 """
1424 That a subvolume group cannot be resized to a size smaller than the current used size
1425 when --no_shrink is given and the quota did not change.
1426 """
1427
1428 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
1429 # create group with 20MB quota
1430 group = self._generate_random_group_name()
1431 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1432 "--size", str(osize), "--mode=777")
1433
1434 # make sure it exists
1435 grouppath = self._get_subvolume_group_path(self.volname, group)
1436 self.assertNotEqual(grouppath, None)
1437
1438 # create subvolume under the group
1439 subvolname = self._generate_random_subvolume_name()
1440 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1441 "--group_name", group, "--mode=777")
1442
1443 # make sure it exists
1444 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1445 self.assertNotEqual(subvolpath, None)
1446
1447 # create one file of 10MB
1448 file_size=self.DEFAULT_FILE_SIZE*10
1449 number_of_files=1
1450 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
1451 number_of_files,
1452 file_size))
1453 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
1454 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
1455
1456 usedsize = int(self.mount_a.getfattr(grouppath, "ceph.dir.rbytes"))
1457
1458 # shrink the subvolume group
1459 nsize = usedsize // 2
1460 try:
1461 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize), "--no_shrink")
1462 except CommandFailedError as ce:
1463 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolumegroup with quota less than used")
1464 else:
1465 self.fail("expected the 'fs subvolumegroup resize' command to fail")
1466
1467 # verify the quota did not change
1468 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1469 self.assertEqual(size, osize)
1470
1471 # remove subvolume and group
1472 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1473 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1474
1475 # verify trash dir is clean
1476 self._wait_for_trash_empty()
1477
1478 def test_subvolume_group_resize_expand_on_full_subvolume(self):
1479 """
1480 That the subvolume group can be expanded after it is full and future write succeed
1481 """
1482
1483 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1484 # create group with 100MB quota
1485 group = self._generate_random_group_name()
1486 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1487 "--size", str(osize), "--mode=777")
1488
1489 # make sure it exists
1490 grouppath = self._get_subvolume_group_path(self.volname, group)
1491 self.assertNotEqual(grouppath, None)
1492
1493 # create subvolume under the group
1494 subvolname = self._generate_random_subvolume_name()
1495 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1496 "--group_name", group, "--mode=777")
1497
1498 # make sure it exists
1499 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1500 self.assertNotEqual(subvolpath, None)
1501
1502 # create 99 files of 1MB
1503 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99)
1504
1505 try:
1506 # write two files of 1MB file to exceed the quota
1507 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1508 # For quota to be enforced
1509 time.sleep(20)
1510 # create 500 files of 1MB
1511 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1512 except CommandFailedError:
1513 # Not able to write. So expand the subvolumegroup more and try writing the files again
1514 nsize = osize*7
1515 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1516 try:
1517 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1518 except CommandFailedError:
1519 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1520 "to succeed".format(subvolname))
1521 else:
1522 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1523 "to fail".format(subvolname))
1524
1525 # remove subvolume and group
1526 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1527 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1528
1529 # verify trash dir is clean
1530 self._wait_for_trash_empty()
1531
1532 def test_subvolume_group_resize_infinite_size(self):
1533 """
1534 That a subvolume group can be resized to an infinite size by unsetting its quota.
1535 """
1536
1537 osize = self.DEFAULT_FILE_SIZE*1024*1024
1538 # create group
1539 group = self._generate_random_group_name()
1540 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1541 "--size", str(osize))
1542
1543 # make sure it exists
1544 grouppath = self._get_subvolume_group_path(self.volname, group)
1545 self.assertNotEqual(grouppath, None)
1546
1547 # resize inf
1548 self._fs_cmd("subvolumegroup", "resize", self.volname, group, "inf")
1549
1550 # verify that the quota is None
1551 size = self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")
1552 self.assertEqual(size, None)
1553
1554 # remove subvolume group
1555 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1556
1557 def test_subvolume_group_resize_infinite_size_future_writes(self):
1558 """
1559 That a subvolume group can be resized to an infinite size and the future writes succeed.
1560 """
1561
1562 osize = self.DEFAULT_FILE_SIZE*1024*1024*5
1563 # create group with 5MB quota
1564 group = self._generate_random_group_name()
1565 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1566 "--size", str(osize), "--mode=777")
1567
1568 # make sure it exists
1569 grouppath = self._get_subvolume_group_path(self.volname, group)
1570 self.assertNotEqual(grouppath, None)
1571
1572 # create subvolume under the group
1573 subvolname = self._generate_random_subvolume_name()
1574 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1575 "--group_name", group, "--mode=777")
1576
1577 # make sure it exists
1578 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1579 self.assertNotEqual(subvolpath, None)
1580
1581 # create 4 files of 1MB
1582 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=4)
1583
1584 try:
1585 # write two files of 1MB file to exceed the quota
1586 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1587 # For quota to be enforced
1588 time.sleep(20)
1589 # create 500 files of 1MB
1590 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1591 except CommandFailedError:
1592 # Not able to write. So resize subvolumegroup to 'inf' and try writing the files again
1593 # resize inf
1594 self._fs_cmd("subvolumegroup", "resize", self.volname, group, "inf")
1595 try:
1596 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1597 except CommandFailedError:
1598 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1599 "to succeed".format(subvolname))
1600 else:
1601 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1602 "to fail".format(subvolname))
1603
1604
1605 # verify that the quota is None
1606 size = self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")
1607 self.assertEqual(size, None)
1608
1609 # remove subvolume and group
1610 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1611 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1612
1613 # verify trash dir is clean
1614 self._wait_for_trash_empty()
1615
1616 def test_subvolume_group_ls(self):
1617 # tests the 'fs subvolumegroup ls' command
1618
1619 subvolumegroups = []
1620
1621 #create subvolumegroups
1622 subvolumegroups = self._generate_random_group_name(3)
1623 for groupname in subvolumegroups:
1624 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
1625
1626 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1627 if len(subvolumegroupls) == 0:
1628 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
1629 else:
1630 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
1631 if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups):
1632 raise RuntimeError("Error creating or listing subvolume groups")
1633
1634 def test_subvolume_group_ls_filter(self):
1635 # tests the 'fs subvolumegroup ls' command filters '_deleting' directory
1636
1637 subvolumegroups = []
1638
1639 #create subvolumegroup
1640 subvolumegroups = self._generate_random_group_name(3)
1641 for groupname in subvolumegroups:
1642 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
1643
1644 # create subvolume and remove. This creates '_deleting' directory.
1645 subvolume = self._generate_random_subvolume_name()
1646 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1647 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1648
1649 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1650 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
1651 if "_deleting" in subvolgroupnames:
1652 self.fail("Listing subvolume groups listed '_deleting' directory")
1653
1654 def test_subvolume_group_ls_filter_internal_directories(self):
1655 # tests the 'fs subvolumegroup ls' command filters internal directories
1656 # eg: '_deleting', '_nogroup', '_index', "_legacy"
1657
1658 subvolumegroups = self._generate_random_group_name(3)
1659 subvolume = self._generate_random_subvolume_name()
1660 snapshot = self._generate_random_snapshot_name()
1661 clone = self._generate_random_clone_name()
1662
1663 #create subvolumegroups
1664 for groupname in subvolumegroups:
1665 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
1666
1667 # create subvolume which will create '_nogroup' directory
1668 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1669
1670 # create snapshot
1671 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1672
1673 # clone snapshot which will create '_index' directory
1674 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
1675
1676 # remove snapshot
1677 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
1678
1679 # remove subvolume which will create '_deleting' directory
1680 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1681
1682 # list subvolumegroups
1683 ret = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1684 self.assertEqual(len(ret), len(subvolumegroups))
1685
1686 ret_list = [subvolumegroup['name'] for subvolumegroup in ret]
1687 self.assertEqual(len(ret_list), len(subvolumegroups))
1688
1689 self.assertEqual(all(elem in subvolumegroups for elem in ret_list), True)
1690
1691 def test_subvolume_group_ls_for_nonexistent_volume(self):
1692 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
1693 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
1694
1695 # list subvolume groups
1696 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1697 if len(subvolumegroupls) > 0:
1698 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
1699
1700 def test_subvolumegroup_pin_distributed(self):
1701 self.fs.set_max_mds(2)
1702 status = self.fs.wait_for_daemons()
1703 self.config_set('mds', 'mds_export_ephemeral_distributed', True)
1704
1705 group = "pinme"
1706 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1707 self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True")
1708 subvolumes = self._generate_random_subvolume_name(50)
1709 for subvolume in subvolumes:
1710 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1711 self._wait_distributed_subtrees(2 * 2, status=status, rank="all")
1712
1713 # remove subvolumes
1714 for subvolume in subvolumes:
1715 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1716
1717 # verify trash dir is clean
1718 self._wait_for_trash_empty()
1719
1720 def test_subvolume_group_rm_force(self):
1721 # test removing non-existing subvolume group with --force
1722 group = self._generate_random_group_name()
1723 try:
1724 self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force")
1725 except CommandFailedError:
1726 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
1727
1728 def test_subvolume_group_exists_with_subvolumegroup_and_no_subvolume(self):
1729 """Test the presence of any subvolumegroup when only subvolumegroup is present"""
1730
1731 group = self._generate_random_group_name()
1732 # create subvolumegroup
1733 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1734 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1735 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1736 # delete subvolumegroup
1737 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1738 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1739 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1740
1741 def test_subvolume_group_exists_with_no_subvolumegroup_and_subvolume(self):
1742 """Test the presence of any subvolumegroup when no subvolumegroup is present"""
1743
1744 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1745 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1746
1747 def test_subvolume_group_exists_with_subvolumegroup_and_subvolume(self):
1748 """Test the presence of any subvolume when subvolumegroup
1749 and subvolume both are present"""
1750
1751 group = self._generate_random_group_name()
1752 subvolume = self._generate_random_subvolume_name(2)
1753 # create subvolumegroup
1754 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1755 # create subvolume in group
1756 self._fs_cmd("subvolume", "create", self.volname, subvolume[0], "--group_name", group)
1757 # create subvolume
1758 self._fs_cmd("subvolume", "create", self.volname, subvolume[1])
1759 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1760 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1761 # delete subvolume in group
1762 self._fs_cmd("subvolume", "rm", self.volname, subvolume[0], "--group_name", group)
1763 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1764 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1765 # delete subvolume
1766 self._fs_cmd("subvolume", "rm", self.volname, subvolume[1])
1767 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1768 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1769 # delete subvolumegroup
1770 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1771 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1772 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1773
1774 def test_subvolume_group_exists_without_subvolumegroup_and_with_subvolume(self):
1775 """Test the presence of any subvolume when subvolume is present
1776 but no subvolumegroup is present"""
1777
1778 subvolume = self._generate_random_subvolume_name()
1779 # create subvolume
1780 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1781 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1782 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1783 # delete subvolume
1784 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1785 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1786 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1787
1788
1789 class TestSubvolumes(TestVolumesHelper):
1790 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
1791 def test_async_subvolume_rm(self):
1792 subvolumes = self._generate_random_subvolume_name(100)
1793
1794 # create subvolumes
1795 for subvolume in subvolumes:
1796 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
1797 self._do_subvolume_io(subvolume, number_of_files=10)
1798
1799 self.mount_a.umount_wait()
1800
1801 # remove subvolumes
1802 for subvolume in subvolumes:
1803 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1804
1805 self.mount_a.mount_wait()
1806
1807 # verify trash dir is clean
1808 self._wait_for_trash_empty(timeout=300)
1809
1810 def test_default_uid_gid_subvolume(self):
1811 subvolume = self._generate_random_subvolume_name()
1812 expected_uid = 0
1813 expected_gid = 0
1814
1815 # create subvolume
1816 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1817 subvol_path = self._get_subvolume_path(self.volname, subvolume)
1818
1819 # check subvolume's uid and gid
1820 stat = self.mount_a.stat(subvol_path)
1821 self.assertEqual(stat['st_uid'], expected_uid)
1822 self.assertEqual(stat['st_gid'], expected_gid)
1823
1824 # remove subvolume
1825 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1826
1827 # verify trash dir is clean
1828 self._wait_for_trash_empty()
1829
1830 def test_nonexistent_subvolume_rm(self):
1831 # remove non-existing subvolume
1832 subvolume = "non_existent_subvolume"
1833
1834 # try, remove subvolume
1835 try:
1836 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1837 except CommandFailedError as ce:
1838 if ce.exitstatus != errno.ENOENT:
1839 raise
1840 else:
1841 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
1842
1843 def test_subvolume_create_and_rm(self):
1844 # create subvolume
1845 subvolume = self._generate_random_subvolume_name()
1846 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1847
1848 # make sure it exists
1849 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1850 self.assertNotEqual(subvolpath, None)
1851
1852 # remove subvolume
1853 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1854 # make sure its gone
1855 try:
1856 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1857 except CommandFailedError as ce:
1858 if ce.exitstatus != errno.ENOENT:
1859 raise
1860 else:
1861 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
1862
1863 # verify trash dir is clean
1864 self._wait_for_trash_empty()
1865
1866 def test_subvolume_create_and_rm_in_group(self):
1867 subvolume = self._generate_random_subvolume_name()
1868 group = self._generate_random_group_name()
1869
1870 # create group
1871 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1872
1873 # create subvolume in group
1874 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1875
1876 # remove subvolume
1877 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1878
1879 # verify trash dir is clean
1880 self._wait_for_trash_empty()
1881
1882 # remove group
1883 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1884
1885 def test_subvolume_create_idempotence(self):
1886 # create subvolume
1887 subvolume = self._generate_random_subvolume_name()
1888 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1889
1890 # try creating w/ same subvolume name -- should be idempotent
1891 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1892
1893 # remove subvolume
1894 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1895
1896 # verify trash dir is clean
1897 self._wait_for_trash_empty()
1898
1899 def test_subvolume_create_idempotence_resize(self):
1900 # create subvolume
1901 subvolume = self._generate_random_subvolume_name()
1902 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1903
1904 # try creating w/ same subvolume name with size -- should set quota
1905 self._fs_cmd("subvolume", "create", self.volname, subvolume, "1000000000")
1906
1907 # get subvolume metadata
1908 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1909 self.assertEqual(subvol_info["bytes_quota"], 1000000000)
1910
1911 # remove subvolume
1912 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1913
1914 # verify trash dir is clean
1915 self._wait_for_trash_empty()
1916
1917 def test_subvolume_create_idempotence_mode(self):
1918 # default mode
1919 default_mode = "755"
1920
1921 # create subvolume
1922 subvolume = self._generate_random_subvolume_name()
1923 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1924
1925 subvol_path = self._get_subvolume_path(self.volname, subvolume)
1926
1927 actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
1928 self.assertEqual(actual_mode_1, default_mode)
1929
1930 # try creating w/ same subvolume name with --mode 777
1931 new_mode = "777"
1932 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", new_mode)
1933
1934 actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
1935 self.assertEqual(actual_mode_2, new_mode)
1936
1937 # remove subvolume
1938 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1939
1940 # verify trash dir is clean
1941 self._wait_for_trash_empty()
1942
1943 def test_subvolume_create_idempotence_without_passing_mode(self):
1944 # create subvolume
1945 desired_mode = "777"
1946 subvolume = self._generate_random_subvolume_name()
1947 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", desired_mode)
1948
1949 subvol_path = self._get_subvolume_path(self.volname, subvolume)
1950
1951 actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
1952 self.assertEqual(actual_mode_1, desired_mode)
1953
1954 # default mode
1955 default_mode = "755"
1956
1957 # try creating w/ same subvolume name without passing --mode argument
1958 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1959
1960 actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
1961 self.assertEqual(actual_mode_2, default_mode)
1962
1963 # remove subvolume
1964 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1965
1966 # verify trash dir is clean
1967 self._wait_for_trash_empty()
1968
1969 def test_subvolume_create_isolated_namespace(self):
1970 """
1971 Create subvolume in separate rados namespace
1972 """
1973
1974 # create subvolume
1975 subvolume = self._generate_random_subvolume_name()
1976 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated")
1977
1978 # get subvolume metadata
1979 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1980 self.assertNotEqual(len(subvol_info), 0)
1981 self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume)
1982
1983 # remove subvolumes
1984 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1985
1986 # verify trash dir is clean
1987 self._wait_for_trash_empty()
1988
1989 def test_subvolume_create_with_auto_cleanup_on_fail(self):
1990 subvolume = self._generate_random_subvolume_name()
1991 data_pool = "invalid_pool"
1992 # create subvolume with invalid data pool layout fails
1993 with self.assertRaises(CommandFailedError):
1994 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
1995
1996 # check whether subvol path is cleaned up
1997 try:
1998 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1999 except CommandFailedError as ce:
2000 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume")
2001 else:
2002 self.fail("expected the 'fs subvolume getpath' command to fail")
2003
2004 # verify trash dir is clean
2005 self._wait_for_trash_empty()
2006
2007 def test_subvolume_create_with_desired_data_pool_layout_in_group(self):
2008 subvol1, subvol2 = self._generate_random_subvolume_name(2)
2009 group = self._generate_random_group_name()
2010
2011 # create group. this also helps set default pool layout for subvolumes
2012 # created within the group.
2013 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2014
2015 # create subvolume in group.
2016 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
2017 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
2018
2019 default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool")
2020 new_pool = "new_pool"
2021 self.assertNotEqual(default_pool, new_pool)
2022
2023 # add data pool
2024 newid = self.fs.add_data_pool(new_pool)
2025
2026 # create subvolume specifying the new data pool as its pool layout
2027 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group,
2028 "--pool_layout", new_pool)
2029 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
2030
2031 desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool")
2032 try:
2033 self.assertEqual(desired_pool, new_pool)
2034 except AssertionError:
2035 self.assertEqual(int(desired_pool), newid) # old kernel returns id
2036
2037 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
2038 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
2039 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2040
2041 # verify trash dir is clean
2042 self._wait_for_trash_empty()
2043
2044 def test_subvolume_create_with_desired_mode(self):
2045 subvol1 = self._generate_random_subvolume_name()
2046
2047 # default mode
2048 default_mode = "755"
2049 # desired mode
2050 desired_mode = "777"
2051
2052 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--mode", "777")
2053
2054 subvol1_path = self._get_subvolume_path(self.volname, subvol1)
2055
2056 # check subvolumegroup's mode
2057 subvol_par_path = os.path.dirname(subvol1_path)
2058 group_path = os.path.dirname(subvol_par_path)
2059 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip()
2060 self.assertEqual(actual_mode1, default_mode)
2061 # check /volumes mode
2062 volumes_path = os.path.dirname(group_path)
2063 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
2064 self.assertEqual(actual_mode2, default_mode)
2065 # check subvolume's mode
2066 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
2067 self.assertEqual(actual_mode3, desired_mode)
2068
2069 self._fs_cmd("subvolume", "rm", self.volname, subvol1)
2070
2071 # verify trash dir is clean
2072 self._wait_for_trash_empty()
2073
2074 def test_subvolume_create_with_desired_mode_in_group(self):
2075 subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
2076
2077 group = self._generate_random_group_name()
2078 # default mode
2079 expected_mode1 = "755"
2080 # desired mode
2081 expected_mode2 = "777"
2082
2083 # create group
2084 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2085
2086 # create subvolume in group
2087 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
2088 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777")
2089 # check whether mode 0777 also works
2090 self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777")
2091
2092 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
2093 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
2094 subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group)
2095
2096 # check subvolume's mode
2097 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
2098 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip()
2099 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip()
2100 self.assertEqual(actual_mode1, expected_mode1)
2101 self.assertEqual(actual_mode2, expected_mode2)
2102 self.assertEqual(actual_mode3, expected_mode2)
2103
2104 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
2105 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
2106 self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
2107 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2108
2109 # verify trash dir is clean
2110 self._wait_for_trash_empty()
2111
2112 def test_subvolume_create_with_desired_uid_gid(self):
2113 """
2114 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
2115 expected values.
2116 """
2117 uid = 1000
2118 gid = 1000
2119
2120 # create subvolume
2121 subvolname = self._generate_random_subvolume_name()
2122 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid))
2123
2124 # make sure it exists
2125 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2126 self.assertNotEqual(subvolpath, None)
2127
2128 # verify the uid and gid
2129 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip())
2130 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip())
2131 self.assertEqual(uid, suid)
2132 self.assertEqual(gid, sgid)
2133
2134 # remove subvolume
2135 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2136
2137 # verify trash dir is clean
2138 self._wait_for_trash_empty()
2139
2140 def test_subvolume_create_with_invalid_data_pool_layout(self):
2141 subvolume = self._generate_random_subvolume_name()
2142 data_pool = "invalid_pool"
2143 # create subvolume with invalid data pool layout
2144 try:
2145 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
2146 except CommandFailedError as ce:
2147 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid pool layout")
2148 else:
2149 self.fail("expected the 'fs subvolume create' command to fail")
2150
2151 # verify trash dir is clean
2152 self._wait_for_trash_empty()
2153
2154 def test_subvolume_create_with_invalid_size(self):
2155 # create subvolume with an invalid size -1
2156 subvolume = self._generate_random_subvolume_name()
2157 try:
2158 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--size", "-1")
2159 except CommandFailedError as ce:
2160 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid size")
2161 else:
2162 self.fail("expected the 'fs subvolume create' command to fail")
2163
2164 # verify trash dir is clean
2165 self._wait_for_trash_empty()
2166
2167 def test_subvolume_create_and_ls_providing_group_as_nogroup(self):
2168 """
2169 That a 'subvolume create' and 'subvolume ls' should throw
2170 permission denied error if option --group=_nogroup is provided.
2171 """
2172
2173 subvolname = self._generate_random_subvolume_name()
2174
2175 # try to create subvolume providing --group_name=_nogroup option
2176 try:
2177 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", "_nogroup")
2178 except CommandFailedError as ce:
2179 self.assertEqual(ce.exitstatus, errno.EPERM)
2180 else:
2181 self.fail("expected the 'fs subvolume create' command to fail")
2182
2183 # create subvolume
2184 self._fs_cmd("subvolume", "create", self.volname, subvolname)
2185
2186 # try to list subvolumes providing --group_name=_nogroup option
2187 try:
2188 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_nogroup")
2189 except CommandFailedError as ce:
2190 self.assertEqual(ce.exitstatus, errno.EPERM)
2191 else:
2192 self.fail("expected the 'fs subvolume ls' command to fail")
2193
2194 # list subvolumes
2195 self._fs_cmd("subvolume", "ls", self.volname)
2196
2197 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2198
2199 # verify trash dir is clean.
2200 self._wait_for_trash_empty()
2201
2202 def test_subvolume_expand(self):
2203 """
2204 That a subvolume can be expanded in size and its quota matches the expected size.
2205 """
2206
2207 # create subvolume
2208 subvolname = self._generate_random_subvolume_name()
2209 osize = self.DEFAULT_FILE_SIZE*1024*1024
2210 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
2211
2212 # make sure it exists
2213 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2214 self.assertNotEqual(subvolpath, None)
2215
2216 # expand the subvolume
2217 nsize = osize*2
2218 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2219
2220 # verify the quota
2221 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2222 self.assertEqual(size, nsize)
2223
2224 # remove subvolume
2225 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2226
2227 # verify trash dir is clean
2228 self._wait_for_trash_empty()
2229
2230 def test_subvolume_info(self):
2231 # tests the 'fs subvolume info' command
2232
2233 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
2234 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
2235 "type", "uid", "features", "state"]
2236
2237 # create subvolume
2238 subvolume = self._generate_random_subvolume_name()
2239 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2240
2241 # get subvolume metadata
2242 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
2243 for md in subvol_md:
2244 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
2245
2246 self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
2247 self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
2248 self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty")
2249 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
2250
2251 self.assertEqual(len(subvol_info["features"]), 3,
2252 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
2253 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2254 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
2255
2256 nsize = self.DEFAULT_FILE_SIZE*1024*1024
2257 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
2258
2259 # get subvolume metadata after quota set
2260 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
2261 for md in subvol_md:
2262 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
2263
2264 self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
2265 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
2266 self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume")
2267 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
2268
2269 self.assertEqual(len(subvol_info["features"]), 3,
2270 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
2271 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2272 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
2273
2274 # remove subvolumes
2275 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2276
2277 # verify trash dir is clean
2278 self._wait_for_trash_empty()
2279
2280 def test_subvolume_ls(self):
2281 # tests the 'fs subvolume ls' command
2282
2283 subvolumes = []
2284
2285 # create subvolumes
2286 subvolumes = self._generate_random_subvolume_name(3)
2287 for subvolume in subvolumes:
2288 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2289
2290 # list subvolumes
2291 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2292 if len(subvolumels) == 0:
2293 self.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
2294 else:
2295 subvolnames = [subvolume['name'] for subvolume in subvolumels]
2296 if collections.Counter(subvolnames) != collections.Counter(subvolumes):
2297 self.fail("Error creating or listing subvolumes")
2298
2299 # remove subvolume
2300 for subvolume in subvolumes:
2301 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2302
2303 # verify trash dir is clean
2304 self._wait_for_trash_empty()
2305
2306 def test_subvolume_ls_with_groupname_as_internal_directory(self):
2307 # tests the 'fs subvolume ls' command when the default groupname as internal directories
2308 # Eg: '_nogroup', '_legacy', '_deleting', '_index'.
2309 # Expecting 'fs subvolume ls' will be fail with errno EINVAL for '_legacy', '_deleting', '_index'
2310 # Expecting 'fs subvolume ls' will be fail with errno EPERM for '_nogroup'
2311
2312 # try to list subvolumes providing --group_name=_nogroup option
2313 try:
2314 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_nogroup")
2315 except CommandFailedError as ce:
2316 self.assertEqual(ce.exitstatus, errno.EPERM)
2317 else:
2318 self.fail("expected the 'fs subvolume ls' command to fail with error 'EPERM' for _nogroup")
2319
2320 # try to list subvolumes providing --group_name=_legacy option
2321 try:
2322 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_legacy")
2323 except CommandFailedError as ce:
2324 self.assertEqual(ce.exitstatus, errno.EINVAL)
2325 else:
2326 self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _legacy")
2327
2328 # try to list subvolumes providing --group_name=_deleting option
2329 try:
2330 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_deleting")
2331 except CommandFailedError as ce:
2332 self.assertEqual(ce.exitstatus, errno.EINVAL)
2333 else:
2334 self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _deleting")
2335
2336 # try to list subvolumes providing --group_name=_index option
2337 try:
2338 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_index")
2339 except CommandFailedError as ce:
2340 self.assertEqual(ce.exitstatus, errno.EINVAL)
2341 else:
2342 self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _index")
2343
2344 def test_subvolume_ls_for_notexistent_default_group(self):
2345 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
2346 # prerequisite: we expect that the volume is created and the default group _nogroup is
2347 # NOT created (i.e. a subvolume without group is not created)
2348
2349 # list subvolumes
2350 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2351 if len(subvolumels) > 0:
2352 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
2353
2354 def test_subvolume_marked(self):
2355 """
2356 ensure a subvolume is marked with the ceph.dir.subvolume xattr
2357 """
2358 subvolume = self._generate_random_subvolume_name()
2359
2360 # create subvolume
2361 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2362
2363 # getpath
2364 subvolpath = self._get_subvolume_path(self.volname, subvolume)
2365
2366 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
2367 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
2368 # outside the subvolume
2369 dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location')
2370 srcpath = os.path.join(self.mount_a.mountpoint, subvolpath)
2371 rename_script = dedent("""
2372 import os
2373 import errno
2374 try:
2375 os.rename("{src}", "{dst}")
2376 except OSError as e:
2377 if e.errno != errno.EXDEV:
2378 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
2379 else:
2380 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
2381 """)
2382 self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath), sudo=True)
2383
2384 # remove subvolume
2385 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2386
2387 # verify trash dir is clean
2388 self._wait_for_trash_empty()
2389
2390 def test_subvolume_pin_export(self):
2391 self.fs.set_max_mds(2)
2392 status = self.fs.wait_for_daemons()
2393
2394 subvolume = self._generate_random_subvolume_name()
2395 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2396 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1")
2397 path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
2398 path = os.path.dirname(path) # get subvolume path
2399
2400 self._get_subtrees(status=status, rank=1)
2401 self._wait_subtrees([(path, 1)], status=status)
2402
2403 # remove subvolume
2404 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2405
2406 # verify trash dir is clean
2407 self._wait_for_trash_empty()
2408
2409 ### authorize operations
2410
2411 def test_authorize_deauthorize_legacy_subvolume(self):
2412 subvolume = self._generate_random_subvolume_name()
2413 group = self._generate_random_group_name()
2414 authid = "alice"
2415
2416 guest_mount = self.mount_b
2417 guest_mount.umount_wait()
2418
2419 # emulate a old-fashioned subvolume in a custom group
2420 createpath = os.path.join(".", "volumes", group, subvolume)
2421 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
2422
2423 # add required xattrs to subvolume
2424 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
2425 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
2426
2427 mount_path = os.path.join("/", "volumes", group, subvolume)
2428
2429 # authorize guest authID read-write access to subvolume
2430 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2431 "--group_name", group, "--tenant_id", "tenant_id")
2432
2433 # guest authID should exist
2434 existing_ids = [a['entity'] for a in self.auth_list()]
2435 self.assertIn("client.{0}".format(authid), existing_ids)
2436
2437 # configure credentials for guest client
2438 self._configure_guest_auth(guest_mount, authid, key)
2439
2440 # mount the subvolume, and write to it
2441 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2442 guest_mount.write_n_mb("data.bin", 1)
2443
2444 # authorize guest authID read access to subvolume
2445 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2446 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
2447
2448 # guest client sees the change in access level to read only after a
2449 # remount of the subvolume.
2450 guest_mount.umount_wait()
2451 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2452
2453 # read existing content of the subvolume
2454 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
2455 # cannot write into read-only subvolume
2456 with self.assertRaises(CommandFailedError):
2457 guest_mount.write_n_mb("rogue.bin", 1)
2458
2459 # cleanup
2460 guest_mount.umount_wait()
2461 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
2462 "--group_name", group)
2463 # guest authID should no longer exist
2464 existing_ids = [a['entity'] for a in self.auth_list()]
2465 self.assertNotIn("client.{0}".format(authid), existing_ids)
2466 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2467 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2468
2469 def test_authorize_deauthorize_subvolume(self):
2470 subvolume = self._generate_random_subvolume_name()
2471 group = self._generate_random_group_name()
2472 authid = "alice"
2473
2474 guest_mount = self.mount_b
2475 guest_mount.umount_wait()
2476
2477 # create group
2478 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=777")
2479
2480 # create subvolume in group
2481 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2482 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
2483 "--group_name", group).rstrip()
2484
2485 # authorize guest authID read-write access to subvolume
2486 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2487 "--group_name", group, "--tenant_id", "tenant_id")
2488
2489 # guest authID should exist
2490 existing_ids = [a['entity'] for a in self.auth_list()]
2491 self.assertIn("client.{0}".format(authid), existing_ids)
2492
2493 # configure credentials for guest client
2494 self._configure_guest_auth(guest_mount, authid, key)
2495
2496 # mount the subvolume, and write to it
2497 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2498 guest_mount.write_n_mb("data.bin", 1)
2499
2500 # authorize guest authID read access to subvolume
2501 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2502 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
2503
2504 # guest client sees the change in access level to read only after a
2505 # remount of the subvolume.
2506 guest_mount.umount_wait()
2507 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2508
2509 # read existing content of the subvolume
2510 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
2511 # cannot write into read-only subvolume
2512 with self.assertRaises(CommandFailedError):
2513 guest_mount.write_n_mb("rogue.bin", 1)
2514
2515 # cleanup
2516 guest_mount.umount_wait()
2517 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
2518 "--group_name", group)
2519 # guest authID should no longer exist
2520 existing_ids = [a['entity'] for a in self.auth_list()]
2521 self.assertNotIn("client.{0}".format(authid), existing_ids)
2522 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2523 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2524
2525 def test_multitenant_subvolumes(self):
2526 """
2527 That subvolume access can be restricted to a tenant.
2528
2529 That metadata used to enforce tenant isolation of
2530 subvolumes is stored as a two-way mapping between auth
2531 IDs and subvolumes that they're authorized to access.
2532 """
2533 subvolume = self._generate_random_subvolume_name()
2534 group = self._generate_random_group_name()
2535
2536 guest_mount = self.mount_b
2537
2538 # Guest clients belonging to different tenants, but using the same
2539 # auth ID.
2540 auth_id = "alice"
2541 guestclient_1 = {
2542 "auth_id": auth_id,
2543 "tenant_id": "tenant1",
2544 }
2545 guestclient_2 = {
2546 "auth_id": auth_id,
2547 "tenant_id": "tenant2",
2548 }
2549
2550 # create group
2551 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2552
2553 # create subvolume in group
2554 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2555
2556 # Check that subvolume metadata file is created on subvolume creation.
2557 subvol_metadata_filename = "_{0}:{1}.meta".format(group, subvolume)
2558 self.assertIn(subvol_metadata_filename, guest_mount.ls("volumes"))
2559
2560 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
2561 # 'tenant1', with 'rw' access to the volume.
2562 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2563 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2564
2565 # Check that auth metadata file for auth ID 'alice', is
2566 # created on authorizing 'alice' access to the subvolume.
2567 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2568 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2569
2570 # Verify that the auth metadata file stores the tenant ID that the
2571 # auth ID belongs to, the auth ID's authorized access levels
2572 # for different subvolumes, versioning details, etc.
2573 expected_auth_metadata = {
2574 "version": 5,
2575 "compat_version": 6,
2576 "dirty": False,
2577 "tenant_id": "tenant1",
2578 "subvolumes": {
2579 "{0}/{1}".format(group,subvolume): {
2580 "dirty": False,
2581 "access_level": "rw"
2582 }
2583 }
2584 }
2585
2586 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
2587 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
2588 del expected_auth_metadata["version"]
2589 del auth_metadata["version"]
2590 self.assertEqual(expected_auth_metadata, auth_metadata)
2591
2592 # Verify that the subvolume metadata file stores info about auth IDs
2593 # and their access levels to the subvolume, versioning details, etc.
2594 expected_subvol_metadata = {
2595 "version": 1,
2596 "compat_version": 1,
2597 "auths": {
2598 "alice": {
2599 "dirty": False,
2600 "access_level": "rw"
2601 }
2602 }
2603 }
2604 subvol_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(subvol_metadata_filename)))
2605
2606 self.assertGreaterEqual(subvol_metadata["version"], expected_subvol_metadata["version"])
2607 del expected_subvol_metadata["version"]
2608 del subvol_metadata["version"]
2609 self.assertEqual(expected_subvol_metadata, subvol_metadata)
2610
2611 # Cannot authorize 'guestclient_2' to access the volume.
2612 # It uses auth ID 'alice', which has already been used by a
2613 # 'guestclient_1' belonging to an another tenant for accessing
2614 # the volume.
2615
2616 try:
2617 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_2["auth_id"],
2618 "--group_name", group, "--tenant_id", guestclient_2["tenant_id"])
2619 except CommandFailedError as ce:
2620 self.assertEqual(ce.exitstatus, errno.EPERM,
2621 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
2622 else:
2623 self.fail("expected the 'fs subvolume authorize' command to fail")
2624
2625 # Check that auth metadata file is cleaned up on removing
2626 # auth ID's only access to a volume.
2627
2628 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
2629 "--group_name", group)
2630 self.assertNotIn(auth_metadata_filename, guest_mount.ls("volumes"))
2631
2632 # Check that subvolume metadata file is cleaned up on subvolume deletion.
2633 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2634 self.assertNotIn(subvol_metadata_filename, guest_mount.ls("volumes"))
2635
2636 # clean up
2637 guest_mount.umount_wait()
2638 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2639
2640 def test_subvolume_authorized_list(self):
2641 subvolume = self._generate_random_subvolume_name()
2642 group = self._generate_random_group_name()
2643 authid1 = "alice"
2644 authid2 = "guest1"
2645 authid3 = "guest2"
2646
2647 # create group
2648 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2649
2650 # create subvolume in group
2651 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2652
2653 # authorize alice authID read-write access to subvolume
2654 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid1,
2655 "--group_name", group)
2656 # authorize guest1 authID read-write access to subvolume
2657 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid2,
2658 "--group_name", group)
2659 # authorize guest2 authID read access to subvolume
2660 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid3,
2661 "--group_name", group, "--access_level", "r")
2662
2663 # list authorized-ids of the subvolume
2664 expected_auth_list = [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
2665 auth_list = json.loads(self._fs_cmd('subvolume', 'authorized_list', self.volname, subvolume, "--group_name", group))
2666 self.assertCountEqual(expected_auth_list, auth_list)
2667
2668 # cleanup
2669 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid1,
2670 "--group_name", group)
2671 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid2,
2672 "--group_name", group)
2673 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid3,
2674 "--group_name", group)
2675 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2676 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2677
2678 def test_authorize_auth_id_not_created_by_mgr_volumes(self):
2679 """
2680 If the auth_id already exists and is not created by mgr plugin,
2681 it's not allowed to authorize the auth-id by default.
2682 """
2683
2684 subvolume = self._generate_random_subvolume_name()
2685 group = self._generate_random_group_name()
2686
2687 # Create auth_id
2688 self.fs.mon_manager.raw_cluster_cmd(
2689 "auth", "get-or-create", "client.guest1",
2690 "mds", "allow *",
2691 "osd", "allow rw",
2692 "mon", "allow *"
2693 )
2694
2695 auth_id = "guest1"
2696 guestclient_1 = {
2697 "auth_id": auth_id,
2698 "tenant_id": "tenant1",
2699 }
2700
2701 # create group
2702 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2703
2704 # create subvolume in group
2705 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2706
2707 try:
2708 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2709 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2710 except CommandFailedError as ce:
2711 self.assertEqual(ce.exitstatus, errno.EPERM,
2712 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
2713 else:
2714 self.fail("expected the 'fs subvolume authorize' command to fail")
2715
2716 # clean up
2717 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2718 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2719 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2720
2721 def test_authorize_allow_existing_id_option(self):
2722 """
2723 If the auth_id already exists and is not created by mgr volumes,
2724 it's not allowed to authorize the auth-id by default but is
2725 allowed with option allow_existing_id.
2726 """
2727
2728 subvolume = self._generate_random_subvolume_name()
2729 group = self._generate_random_group_name()
2730
2731 # Create auth_id
2732 self.fs.mon_manager.raw_cluster_cmd(
2733 "auth", "get-or-create", "client.guest1",
2734 "mds", "allow *",
2735 "osd", "allow rw",
2736 "mon", "allow *"
2737 )
2738
2739 auth_id = "guest1"
2740 guestclient_1 = {
2741 "auth_id": auth_id,
2742 "tenant_id": "tenant1",
2743 }
2744
2745 # create group
2746 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2747
2748 # create subvolume in group
2749 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2750
2751 # Cannot authorize 'guestclient_1' to access the volume by default,
2752 # which already exists and not created by mgr volumes but is allowed
2753 # with option 'allow_existing_id'.
2754 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2755 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"], "--allow-existing-id")
2756
2757 # clean up
2758 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
2759 "--group_name", group)
2760 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2761 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2762 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2763
2764 def test_deauthorize_auth_id_after_out_of_band_update(self):
2765 """
2766 If the auth_id authorized by mgr/volumes plugin is updated
2767 out of band, the auth_id should not be deleted after a
2768 deauthorize. It should only remove caps associated with it.
2769 """
2770
2771 subvolume = self._generate_random_subvolume_name()
2772 group = self._generate_random_group_name()
2773
2774 auth_id = "guest1"
2775 guestclient_1 = {
2776 "auth_id": auth_id,
2777 "tenant_id": "tenant1",
2778 }
2779
2780 # create group
2781 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2782
2783 # create subvolume in group
2784 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2785
2786 # Authorize 'guestclient_1' to access the subvolume.
2787 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2788 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2789
2790 subvol_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
2791 "--group_name", group).rstrip()
2792
2793 # Update caps for guestclient_1 out of band
2794 out = self.fs.mon_manager.raw_cluster_cmd(
2795 "auth", "caps", "client.guest1",
2796 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path),
2797 "osd", "allow rw pool=cephfs_data",
2798 "mon", "allow r",
2799 "mgr", "allow *"
2800 )
2801
2802 # Deauthorize guestclient_1
2803 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
2804
2805 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
2806 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
2807 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
2808
2809 self.assertEqual("client.guest1", out[0]["entity"])
2810 self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"])
2811 self.assertEqual("allow *", out[0]["caps"]["mgr"])
2812 self.assertNotIn("osd", out[0]["caps"])
2813
2814 # clean up
2815 out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2816 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2817 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2818
2819 def test_recover_auth_metadata_during_authorize(self):
2820 """
2821 That auth metadata manager can recover from partial auth updates using
2822 metadata files, which store auth info and its update status info. This
2823 test validates the recovery during authorize.
2824 """
2825
2826 guest_mount = self.mount_b
2827
2828 subvolume = self._generate_random_subvolume_name()
2829 group = self._generate_random_group_name()
2830
2831 auth_id = "guest1"
2832 guestclient_1 = {
2833 "auth_id": auth_id,
2834 "tenant_id": "tenant1",
2835 }
2836
2837 # create group
2838 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2839
2840 # create subvolume in group
2841 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2842
2843 # Authorize 'guestclient_1' to access the subvolume.
2844 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2845 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2846
2847 # Check that auth metadata file for auth ID 'guest1', is
2848 # created on authorizing 'guest1' access to the subvolume.
2849 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2850 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2851 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2852
2853 # Induce partial auth update state by modifying the auth metadata file,
2854 # and then run authorize again.
2855 guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
2856
2857 # Authorize 'guestclient_1' to access the subvolume.
2858 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2859 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2860
2861 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2862 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
2863
2864 # clean up
2865 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
2866 guest_mount.umount_wait()
2867 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2868 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2869 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2870
2871 def test_recover_auth_metadata_during_deauthorize(self):
2872 """
2873 That auth metadata manager can recover from partial auth updates using
2874 metadata files, which store auth info and its update status info. This
2875 test validates the recovery during deauthorize.
2876 """
2877
2878 guest_mount = self.mount_b
2879
2880 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
2881 group = self._generate_random_group_name()
2882
2883 guestclient_1 = {
2884 "auth_id": "guest1",
2885 "tenant_id": "tenant1",
2886 }
2887
2888 # create group
2889 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2890
2891 # create subvolumes in group
2892 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
2893 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
2894
2895 # Authorize 'guestclient_1' to access the subvolume1.
2896 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
2897 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2898
2899 # Check that auth metadata file for auth ID 'guest1', is
2900 # created on authorizing 'guest1' access to the subvolume1.
2901 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2902 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2903 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2904
2905 # Authorize 'guestclient_1' to access the subvolume2.
2906 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
2907 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2908
2909 # Induce partial auth update state by modifying the auth metadata file,
2910 # and then run de-authorize.
2911 guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
2912
2913 # Deauthorize 'guestclient_1' to access the subvolume2.
2914 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"],
2915 "--group_name", group)
2916
2917 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2918 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
2919
2920 # clean up
2921 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
2922 guest_mount.umount_wait()
2923 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2924 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
2925 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
2926 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2927
2928 def test_update_old_style_auth_metadata_to_new_during_authorize(self):
2929 """
2930 CephVolumeClient stores the subvolume data in auth metadata file with
2931 'volumes' key as there was no subvolume namespace. It doesn't makes sense
2932 with mgr/volumes. This test validates the transparent update of 'volumes'
2933 key to 'subvolumes' key in auth metadata file during authorize.
2934 """
2935
2936 guest_mount = self.mount_b
2937
2938 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
2939 group = self._generate_random_group_name()
2940
2941 auth_id = "guest1"
2942 guestclient_1 = {
2943 "auth_id": auth_id,
2944 "tenant_id": "tenant1",
2945 }
2946
2947 # create group
2948 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2949
2950 # create subvolumes in group
2951 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
2952 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
2953
2954 # Authorize 'guestclient_1' to access the subvolume1.
2955 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
2956 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2957
2958 # Check that auth metadata file for auth ID 'guest1', is
2959 # created on authorizing 'guest1' access to the subvolume1.
2960 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2961 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2962
2963 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
2964 guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
2965
2966 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
2967 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
2968 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2969
2970 expected_auth_metadata = {
2971 "version": 5,
2972 "compat_version": 6,
2973 "dirty": False,
2974 "tenant_id": "tenant1",
2975 "subvolumes": {
2976 "{0}/{1}".format(group,subvolume1): {
2977 "dirty": False,
2978 "access_level": "rw"
2979 },
2980 "{0}/{1}".format(group,subvolume2): {
2981 "dirty": False,
2982 "access_level": "rw"
2983 }
2984 }
2985 }
2986
2987 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
2988
2989 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
2990 del expected_auth_metadata["version"]
2991 del auth_metadata["version"]
2992 self.assertEqual(expected_auth_metadata, auth_metadata)
2993
2994 # clean up
2995 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
2996 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
2997 guest_mount.umount_wait()
2998 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2999 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
3000 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
3001 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3002
3003 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self):
3004 """
3005 CephVolumeClient stores the subvolume data in auth metadata file with
3006 'volumes' key as there was no subvolume namespace. It doesn't makes sense
3007 with mgr/volumes. This test validates the transparent update of 'volumes'
3008 key to 'subvolumes' key in auth metadata file during deauthorize.
3009 """
3010
3011 guest_mount = self.mount_b
3012
3013 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
3014 group = self._generate_random_group_name()
3015
3016 auth_id = "guest1"
3017 guestclient_1 = {
3018 "auth_id": auth_id,
3019 "tenant_id": "tenant1",
3020 }
3021
3022 # create group
3023 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3024
3025 # create subvolumes in group
3026 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
3027 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
3028
3029 # Authorize 'guestclient_1' to access the subvolume1.
3030 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
3031 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3032
3033 # Authorize 'guestclient_1' to access the subvolume2.
3034 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
3035 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3036
3037 # Check that auth metadata file for auth ID 'guest1', is created.
3038 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
3039 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
3040
3041 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
3042 guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
3043
3044 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
3045 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
3046
3047 expected_auth_metadata = {
3048 "version": 5,
3049 "compat_version": 6,
3050 "dirty": False,
3051 "tenant_id": "tenant1",
3052 "subvolumes": {
3053 "{0}/{1}".format(group,subvolume1): {
3054 "dirty": False,
3055 "access_level": "rw"
3056 }
3057 }
3058 }
3059
3060 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
3061
3062 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
3063 del expected_auth_metadata["version"]
3064 del auth_metadata["version"]
3065 self.assertEqual(expected_auth_metadata, auth_metadata)
3066
3067 # clean up
3068 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
3069 guest_mount.umount_wait()
3070 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
3071 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
3072 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
3073 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3074
3075 def test_subvolume_evict_client(self):
3076 """
3077 That a subvolume client can be evicted based on the auth ID
3078 """
3079
3080 subvolumes = self._generate_random_subvolume_name(2)
3081 group = self._generate_random_group_name()
3082
3083 # create group
3084 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3085
3086 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
3087 for i in range(0, 2):
3088 self.mounts[i].umount_wait()
3089 guest_mounts = (self.mounts[0], self.mounts[1])
3090 auth_id = "guest"
3091 guestclient_1 = {
3092 "auth_id": auth_id,
3093 "tenant_id": "tenant1",
3094 }
3095
3096 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
3097 # subvolumes. Mount the two subvolumes. Write data to the volumes.
3098 for i in range(2):
3099 # Create subvolume.
3100 self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group, "--mode=777")
3101
3102 # authorize guest authID read-write access to subvolume
3103 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolumes[i], guestclient_1["auth_id"],
3104 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3105
3106 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolumes[i],
3107 "--group_name", group).rstrip()
3108 # configure credentials for guest client
3109 self._configure_guest_auth(guest_mounts[i], auth_id, key)
3110
3111 # mount the subvolume, and write to it
3112 guest_mounts[i].mount_wait(cephfs_mntpt=mount_path)
3113 guest_mounts[i].write_n_mb("data.bin", 1)
3114
3115 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
3116 # one volume.
3117 self._fs_cmd("subvolume", "evict", self.volname, subvolumes[0], auth_id, "--group_name", group)
3118
3119 # Evicted guest client, guest_mounts[0], should not be able to do
3120 # anymore metadata ops. It should start failing all operations
3121 # when it sees that its own address is in the blocklist.
3122 try:
3123 guest_mounts[0].write_n_mb("rogue.bin", 1)
3124 except CommandFailedError:
3125 pass
3126 else:
3127 raise RuntimeError("post-eviction write should have failed!")
3128
3129 # The blocklisted guest client should now be unmountable
3130 guest_mounts[0].umount_wait()
3131
3132 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
3133 # has mounted the other volume, should be able to use its volume
3134 # unaffected.
3135 guest_mounts[1].write_n_mb("data.bin.1", 1)
3136
3137 # Cleanup.
3138 guest_mounts[1].umount_wait()
3139 for i in range(2):
3140 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolumes[i], auth_id, "--group_name", group)
3141 self._fs_cmd("subvolume", "rm", self.volname, subvolumes[i], "--group_name", group)
3142 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3143
3144 def test_subvolume_pin_random(self):
3145 self.fs.set_max_mds(2)
3146 self.fs.wait_for_daemons()
3147 self.config_set('mds', 'mds_export_ephemeral_random', True)
3148
3149 subvolume = self._generate_random_subvolume_name()
3150 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3151 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01")
3152 # no verification
3153
3154 # remove subvolume
3155 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3156
3157 # verify trash dir is clean
3158 self._wait_for_trash_empty()
3159
3160 def test_subvolume_resize_fail_invalid_size(self):
3161 """
3162 That a subvolume cannot be resized to an invalid size and the quota did not change
3163 """
3164
3165 osize = self.DEFAULT_FILE_SIZE*1024*1024
3166 # create subvolume
3167 subvolname = self._generate_random_subvolume_name()
3168 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
3169
3170 # make sure it exists
3171 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3172 self.assertNotEqual(subvolpath, None)
3173
3174 # try to resize the subvolume with an invalid size -10
3175 nsize = -10
3176 try:
3177 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3178 except CommandFailedError as ce:
3179 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
3180 else:
3181 self.fail("expected the 'fs subvolume resize' command to fail")
3182
3183 # verify the quota did not change
3184 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3185 self.assertEqual(size, osize)
3186
3187 # remove subvolume
3188 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3189
3190 # verify trash dir is clean
3191 self._wait_for_trash_empty()
3192
3193 def test_subvolume_resize_fail_zero_size(self):
3194 """
3195 That a subvolume cannot be resized to a zero size and the quota did not change
3196 """
3197
3198 osize = self.DEFAULT_FILE_SIZE*1024*1024
3199 # create subvolume
3200 subvolname = self._generate_random_subvolume_name()
3201 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
3202
3203 # make sure it exists
3204 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3205 self.assertNotEqual(subvolpath, None)
3206
3207 # try to resize the subvolume with size 0
3208 nsize = 0
3209 try:
3210 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3211 except CommandFailedError as ce:
3212 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
3213 else:
3214 self.fail("expected the 'fs subvolume resize' command to fail")
3215
3216 # verify the quota did not change
3217 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3218 self.assertEqual(size, osize)
3219
3220 # remove subvolume
3221 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3222
3223 # verify trash dir is clean
3224 self._wait_for_trash_empty()
3225
3226 def test_subvolume_resize_quota_lt_used_size(self):
3227 """
3228 That a subvolume can be resized to a size smaller than the current used size
3229 and the resulting quota matches the expected size.
3230 """
3231
3232 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
3233 # create subvolume
3234 subvolname = self._generate_random_subvolume_name()
3235 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
3236
3237 # make sure it exists
3238 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3239 self.assertNotEqual(subvolpath, None)
3240
3241 # create one file of 10MB
3242 file_size=self.DEFAULT_FILE_SIZE*10
3243 number_of_files=1
3244 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3245 number_of_files,
3246 file_size))
3247 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
3248 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3249
3250 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
3251 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
3252 if isinstance(self.mount_a, FuseMount):
3253 # kclient dir does not have size==rbytes
3254 self.assertEqual(usedsize, susedsize)
3255
3256 # shrink the subvolume
3257 nsize = usedsize // 2
3258 try:
3259 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3260 except CommandFailedError:
3261 self.fail("expected the 'fs subvolume resize' command to succeed")
3262
3263 # verify the quota
3264 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3265 self.assertEqual(size, nsize)
3266
3267 # remove subvolume
3268 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3269
3270 # verify trash dir is clean
3271 self._wait_for_trash_empty()
3272
3273 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self):
3274 """
3275 That a subvolume cannot be resized to a size smaller than the current used size
3276 when --no_shrink is given and the quota did not change.
3277 """
3278
3279 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
3280 # create subvolume
3281 subvolname = self._generate_random_subvolume_name()
3282 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
3283
3284 # make sure it exists
3285 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3286 self.assertNotEqual(subvolpath, None)
3287
3288 # create one file of 10MB
3289 file_size=self.DEFAULT_FILE_SIZE*10
3290 number_of_files=1
3291 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3292 number_of_files,
3293 file_size))
3294 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
3295 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3296
3297 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
3298 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
3299 if isinstance(self.mount_a, FuseMount):
3300 # kclient dir does not have size==rbytes
3301 self.assertEqual(usedsize, susedsize)
3302
3303 # shrink the subvolume
3304 nsize = usedsize // 2
3305 try:
3306 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink")
3307 except CommandFailedError as ce:
3308 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
3309 else:
3310 self.fail("expected the 'fs subvolume resize' command to fail")
3311
3312 # verify the quota did not change
3313 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3314 self.assertEqual(size, osize)
3315
3316 # remove subvolume
3317 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3318
3319 # verify trash dir is clean
3320 self._wait_for_trash_empty()
3321
3322 def test_subvolume_resize_expand_on_full_subvolume(self):
3323 """
3324 That the subvolume can be expanded from a full subvolume and future writes succeed.
3325 """
3326
3327 osize = self.DEFAULT_FILE_SIZE*1024*1024*10
3328 # create subvolume of quota 10MB and make sure it exists
3329 subvolname = self._generate_random_subvolume_name()
3330 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
3331 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3332 self.assertNotEqual(subvolpath, None)
3333
3334 # create one file of size 10MB and write
3335 file_size=self.DEFAULT_FILE_SIZE*10
3336 number_of_files=1
3337 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3338 number_of_files,
3339 file_size))
3340 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3)
3341 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3342
3343 # create a file of size 5MB and try write more
3344 file_size=file_size // 2
3345 number_of_files=1
3346 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3347 number_of_files,
3348 file_size))
3349 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4)
3350 try:
3351 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3352 except CommandFailedError:
3353 # Not able to write. So expand the subvolume more and try writing the 5MB file again
3354 nsize = osize*2
3355 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3356 try:
3357 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3358 except CommandFailedError:
3359 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3360 "to succeed".format(subvolname, number_of_files, file_size))
3361 else:
3362 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3363 "to fail".format(subvolname, number_of_files, file_size))
3364
3365 # remove subvolume
3366 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3367
3368 # verify trash dir is clean
3369 self._wait_for_trash_empty()
3370
3371 def test_subvolume_resize_infinite_size(self):
3372 """
3373 That a subvolume can be resized to an infinite size by unsetting its quota.
3374 """
3375
3376 # create subvolume
3377 subvolname = self._generate_random_subvolume_name()
3378 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
3379 str(self.DEFAULT_FILE_SIZE*1024*1024))
3380
3381 # make sure it exists
3382 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3383 self.assertNotEqual(subvolpath, None)
3384
3385 # resize inf
3386 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
3387
3388 # verify that the quota is None
3389 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
3390 self.assertEqual(size, None)
3391
3392 # remove subvolume
3393 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3394
3395 # verify trash dir is clean
3396 self._wait_for_trash_empty()
3397
3398 def test_subvolume_resize_infinite_size_future_writes(self):
3399 """
3400 That a subvolume can be resized to an infinite size and the future writes succeed.
3401 """
3402
3403 # create subvolume
3404 subvolname = self._generate_random_subvolume_name()
3405 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
3406 str(self.DEFAULT_FILE_SIZE*1024*1024*5), "--mode=777")
3407
3408 # make sure it exists
3409 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3410 self.assertNotEqual(subvolpath, None)
3411
3412 # resize inf
3413 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
3414
3415 # verify that the quota is None
3416 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
3417 self.assertEqual(size, None)
3418
3419 # create one file of 10MB and try to write
3420 file_size=self.DEFAULT_FILE_SIZE*10
3421 number_of_files=1
3422 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3423 number_of_files,
3424 file_size))
3425 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5)
3426
3427 try:
3428 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3429 except CommandFailedError:
3430 self.fail("expected filling subvolume {0} with {1} file of size {2}MB "
3431 "to succeed".format(subvolname, number_of_files, file_size))
3432
3433 # remove subvolume
3434 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3435
3436 # verify trash dir is clean
3437 self._wait_for_trash_empty()
3438
3439 def test_subvolume_rm_force(self):
3440 # test removing non-existing subvolume with --force
3441 subvolume = self._generate_random_subvolume_name()
3442 try:
3443 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
3444 except CommandFailedError:
3445 self.fail("expected the 'fs subvolume rm --force' command to succeed")
3446
3447 def test_subvolume_exists_with_subvolumegroup_and_subvolume(self):
3448 """Test the presence of any subvolume by specifying the name of subvolumegroup"""
3449
3450 group = self._generate_random_group_name()
3451 subvolume1 = self._generate_random_subvolume_name()
3452 # create subvolumegroup
3453 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3454 # create subvolume in group
3455 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
3456 ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group)
3457 self.assertEqual(ret.strip('\n'), "subvolume exists")
3458 # delete subvolume in group
3459 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
3460 ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group)
3461 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3462 # delete subvolumegroup
3463 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3464
3465 def test_subvolume_exists_with_subvolumegroup_and_no_subvolume(self):
3466 """Test the presence of any subvolume specifying the name
3467 of subvolumegroup and no subvolumes"""
3468
3469 group = self._generate_random_group_name()
3470 # create subvolumegroup
3471 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3472 ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group)
3473 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3474 # delete subvolumegroup
3475 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3476
3477 def test_subvolume_exists_without_subvolumegroup_and_with_subvolume(self):
3478 """Test the presence of any subvolume without specifying the name
3479 of subvolumegroup"""
3480
3481 subvolume1 = self._generate_random_subvolume_name()
3482 # create subvolume
3483 self._fs_cmd("subvolume", "create", self.volname, subvolume1)
3484 ret = self._fs_cmd("subvolume", "exist", self.volname)
3485 self.assertEqual(ret.strip('\n'), "subvolume exists")
3486 # delete subvolume
3487 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
3488 ret = self._fs_cmd("subvolume", "exist", self.volname)
3489 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3490
3491 def test_subvolume_exists_without_subvolumegroup_and_without_subvolume(self):
3492 """Test the presence of any subvolume without any subvolumegroup
3493 and without any subvolume"""
3494
3495 ret = self._fs_cmd("subvolume", "exist", self.volname)
3496 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3497
3498 def test_subvolume_shrink(self):
3499 """
3500 That a subvolume can be shrinked in size and its quota matches the expected size.
3501 """
3502
3503 # create subvolume
3504 subvolname = self._generate_random_subvolume_name()
3505 osize = self.DEFAULT_FILE_SIZE*1024*1024
3506 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
3507
3508 # make sure it exists
3509 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3510 self.assertNotEqual(subvolpath, None)
3511
3512 # shrink the subvolume
3513 nsize = osize // 2
3514 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3515
3516 # verify the quota
3517 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3518 self.assertEqual(size, nsize)
3519
3520 # remove subvolume
3521 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3522
3523 # verify trash dir is clean
3524 self._wait_for_trash_empty()
3525
3526 def test_subvolume_retain_snapshot_rm_idempotency(self):
3527 """
3528 ensure subvolume deletion of a subvolume which is already deleted with retain snapshots option passes.
3529 After subvolume deletion with retain snapshots, the subvolume exists until the trash directory (resides inside subvolume)
3530 is cleaned up. The subvolume deletion issued while the trash directory is not empty, should pass and should
3531 not error out with EAGAIN.
3532 """
3533 subvolume = self._generate_random_subvolume_name()
3534 snapshot = self._generate_random_snapshot_name()
3535
3536 # create subvolume
3537 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
3538
3539 # do some IO
3540 self._do_subvolume_io(subvolume, number_of_files=256)
3541
3542 # snapshot subvolume
3543 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3544
3545 # remove with snapshot retention
3546 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3547
3548 # remove snapshots (removes retained volume)
3549 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3550
3551 # remove subvolume (check idempotency)
3552 try:
3553 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3554 except CommandFailedError as ce:
3555 if ce.exitstatus != errno.ENOENT:
3556 self.fail(f"expected subvolume rm to pass with error: {os.strerror(ce.exitstatus)}")
3557
3558 # verify trash dir is clean
3559 self._wait_for_trash_empty()
3560
3561
3562 def test_subvolume_user_metadata_set(self):
3563 subvolname = self._generate_random_subvolume_name()
3564 group = self._generate_random_group_name()
3565
3566 # create group.
3567 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3568
3569 # create subvolume in group.
3570 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3571
3572 # set metadata for subvolume.
3573 key = "key"
3574 value = "value"
3575 try:
3576 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3577 except CommandFailedError:
3578 self.fail("expected the 'fs subvolume metadata set' command to succeed")
3579
3580 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3581 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3582
3583 # verify trash dir is clean.
3584 self._wait_for_trash_empty()
3585
3586 def test_subvolume_user_metadata_set_idempotence(self):
3587 subvolname = self._generate_random_subvolume_name()
3588 group = self._generate_random_group_name()
3589
3590 # create group.
3591 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3592
3593 # create subvolume in group.
3594 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3595
3596 # set metadata for subvolume.
3597 key = "key"
3598 value = "value"
3599 try:
3600 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3601 except CommandFailedError:
3602 self.fail("expected the 'fs subvolume metadata set' command to succeed")
3603
3604 # set same metadata again for subvolume.
3605 try:
3606 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3607 except CommandFailedError:
3608 self.fail("expected the 'fs subvolume metadata set' command to succeed because it is idempotent operation")
3609
3610 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3611 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3612
3613 # verify trash dir is clean.
3614 self._wait_for_trash_empty()
3615
3616 def test_subvolume_user_metadata_get(self):
3617 subvolname = self._generate_random_subvolume_name()
3618 group = self._generate_random_group_name()
3619
3620 # create group.
3621 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3622
3623 # create subvolume in group.
3624 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3625
3626 # set metadata for subvolume.
3627 key = "key"
3628 value = "value"
3629 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3630
3631 # get value for specified key.
3632 try:
3633 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3634 except CommandFailedError:
3635 self.fail("expected the 'fs subvolume metadata get' command to succeed")
3636
3637 # remove '\n' from returned value.
3638 ret = ret.strip('\n')
3639
3640 # match received value with expected value.
3641 self.assertEqual(value, ret)
3642
3643 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3644 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3645
3646 # verify trash dir is clean.
3647 self._wait_for_trash_empty()
3648
3649 def test_subvolume_user_metadata_get_for_nonexisting_key(self):
3650 subvolname = self._generate_random_subvolume_name()
3651 group = self._generate_random_group_name()
3652
3653 # create group.
3654 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3655
3656 # create subvolume in group.
3657 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3658
3659 # set metadata for subvolume.
3660 key = "key"
3661 value = "value"
3662 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3663
3664 # try to get value for nonexisting key
3665 # Expecting ENOENT exit status because key does not exist
3666 try:
3667 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key_nonexist", "--group_name", group)
3668 except CommandFailedError as e:
3669 self.assertEqual(e.exitstatus, errno.ENOENT)
3670 else:
3671 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
3672
3673 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3674 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3675
3676 # verify trash dir is clean.
3677 self._wait_for_trash_empty()
3678
3679 def test_subvolume_user_metadata_get_for_nonexisting_section(self):
3680 subvolname = self._generate_random_subvolume_name()
3681 group = self._generate_random_group_name()
3682
3683 # create group.
3684 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3685
3686 # create subvolume in group.
3687 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3688
3689 # try to get value for nonexisting key (as section does not exist)
3690 # Expecting ENOENT exit status because key does not exist
3691 try:
3692 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key", "--group_name", group)
3693 except CommandFailedError as e:
3694 self.assertEqual(e.exitstatus, errno.ENOENT)
3695 else:
3696 self.fail("Expected ENOENT because section does not exist")
3697
3698 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3699 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3700
3701 # verify trash dir is clean.
3702 self._wait_for_trash_empty()
3703
3704 def test_subvolume_user_metadata_update(self):
3705 subvolname = self._generate_random_subvolume_name()
3706 group = self._generate_random_group_name()
3707
3708 # create group.
3709 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3710
3711 # create subvolume in group.
3712 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3713
3714 # set metadata for subvolume.
3715 key = "key"
3716 value = "value"
3717 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3718
3719 # update metadata against key.
3720 new_value = "new_value"
3721 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, new_value, "--group_name", group)
3722
3723 # get metadata for specified key of subvolume.
3724 try:
3725 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3726 except CommandFailedError:
3727 self.fail("expected the 'fs subvolume metadata get' command to succeed")
3728
3729 # remove '\n' from returned value.
3730 ret = ret.strip('\n')
3731
3732 # match received value with expected value.
3733 self.assertEqual(new_value, ret)
3734
3735 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3736 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3737
3738 # verify trash dir is clean.
3739 self._wait_for_trash_empty()
3740
3741 def test_subvolume_user_metadata_list(self):
3742 subvolname = self._generate_random_subvolume_name()
3743 group = self._generate_random_group_name()
3744
3745 # create group.
3746 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3747
3748 # create subvolume in group.
3749 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3750
3751 # set metadata for subvolume.
3752 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
3753
3754 for k, v in input_metadata_dict.items():
3755 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, k, v, "--group_name", group)
3756
3757 # list metadata
3758 try:
3759 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
3760 except CommandFailedError:
3761 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
3762
3763 ret_dict = json.loads(ret)
3764
3765 # compare output with expected output
3766 self.assertDictEqual(input_metadata_dict, ret_dict)
3767
3768 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3769 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3770
3771 # verify trash dir is clean.
3772 self._wait_for_trash_empty()
3773
3774 def test_subvolume_user_metadata_list_if_no_metadata_set(self):
3775 subvolname = self._generate_random_subvolume_name()
3776 group = self._generate_random_group_name()
3777
3778 # create group.
3779 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3780
3781 # create subvolume in group.
3782 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3783
3784 # list metadata
3785 try:
3786 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
3787 except CommandFailedError:
3788 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
3789
3790 # remove '\n' from returned value.
3791 ret = ret.strip('\n')
3792
3793 # compare output with expected output
3794 # expecting empty json/dictionary
3795 self.assertEqual(ret, "{}")
3796
3797 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3798 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3799
3800 # verify trash dir is clean.
3801 self._wait_for_trash_empty()
3802
3803 def test_subvolume_user_metadata_remove(self):
3804 subvolname = self._generate_random_subvolume_name()
3805 group = self._generate_random_group_name()
3806
3807 # create group.
3808 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3809
3810 # create subvolume in group.
3811 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3812
3813 # set metadata for subvolume.
3814 key = "key"
3815 value = "value"
3816 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3817
3818 # remove metadata against specified key.
3819 try:
3820 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group)
3821 except CommandFailedError:
3822 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
3823
3824 # confirm key is removed by again fetching metadata
3825 try:
3826 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3827 except CommandFailedError as e:
3828 self.assertEqual(e.exitstatus, errno.ENOENT)
3829 else:
3830 self.fail("Expected ENOENT because key does not exist")
3831
3832 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3833 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3834
3835 # verify trash dir is clean.
3836 self._wait_for_trash_empty()
3837
3838 def test_subvolume_user_metadata_remove_for_nonexisting_key(self):
3839 subvolname = self._generate_random_subvolume_name()
3840 group = self._generate_random_group_name()
3841
3842 # create group.
3843 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3844
3845 # create subvolume in group.
3846 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3847
3848 # set metadata for subvolume.
3849 key = "key"
3850 value = "value"
3851 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3852
3853 # try to remove value for nonexisting key
3854 # Expecting ENOENT exit status because key does not exist
3855 try:
3856 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key_nonexist", "--group_name", group)
3857 except CommandFailedError as e:
3858 self.assertEqual(e.exitstatus, errno.ENOENT)
3859 else:
3860 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
3861
3862 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3863 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3864
3865 # verify trash dir is clean.
3866 self._wait_for_trash_empty()
3867
3868 def test_subvolume_user_metadata_remove_for_nonexisting_section(self):
3869 subvolname = self._generate_random_subvolume_name()
3870 group = self._generate_random_group_name()
3871
3872 # create group.
3873 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3874
3875 # create subvolume in group.
3876 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3877
3878 # try to remove value for nonexisting key (as section does not exist)
3879 # Expecting ENOENT exit status because key does not exist
3880 try:
3881 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key", "--group_name", group)
3882 except CommandFailedError as e:
3883 self.assertEqual(e.exitstatus, errno.ENOENT)
3884 else:
3885 self.fail("Expected ENOENT because section does not exist")
3886
3887 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3888 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3889
3890 # verify trash dir is clean.
3891 self._wait_for_trash_empty()
3892
3893 def test_subvolume_user_metadata_remove_force(self):
3894 subvolname = self._generate_random_subvolume_name()
3895 group = self._generate_random_group_name()
3896
3897 # create group.
3898 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3899
3900 # create subvolume in group.
3901 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3902
3903 # set metadata for subvolume.
3904 key = "key"
3905 value = "value"
3906 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3907
3908 # remove metadata against specified key with --force option.
3909 try:
3910 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group, "--force")
3911 except CommandFailedError:
3912 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
3913
3914 # confirm key is removed by again fetching metadata
3915 try:
3916 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3917 except CommandFailedError as e:
3918 self.assertEqual(e.exitstatus, errno.ENOENT)
3919 else:
3920 self.fail("Expected ENOENT because key does not exist")
3921
3922 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3923 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3924
3925 # verify trash dir is clean.
3926 self._wait_for_trash_empty()
3927
3928 def test_subvolume_user_metadata_remove_force_for_nonexisting_key(self):
3929 subvolname = self._generate_random_subvolume_name()
3930 group = self._generate_random_group_name()
3931
3932 # create group.
3933 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3934
3935 # create subvolume in group.
3936 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3937
3938 # set metadata for subvolume.
3939 key = "key"
3940 value = "value"
3941 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3942
3943 # remove metadata against specified key.
3944 try:
3945 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group)
3946 except CommandFailedError:
3947 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
3948
3949 # confirm key is removed by again fetching metadata
3950 try:
3951 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3952 except CommandFailedError as e:
3953 self.assertEqual(e.exitstatus, errno.ENOENT)
3954 else:
3955 self.fail("Expected ENOENT because key does not exist")
3956
3957 # again remove metadata against already removed key with --force option.
3958 try:
3959 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group, "--force")
3960 except CommandFailedError:
3961 self.fail("expected the 'fs subvolume metadata rm' (with --force) command to succeed")
3962
3963 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3964 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3965
3966 # verify trash dir is clean.
3967 self._wait_for_trash_empty()
3968
3969 def test_subvolume_user_metadata_set_and_get_for_legacy_subvolume(self):
3970 subvolname = self._generate_random_subvolume_name()
3971 group = self._generate_random_group_name()
3972
3973 # emulate a old-fashioned subvolume in a custom group
3974 createpath = os.path.join(".", "volumes", group, subvolname)
3975 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
3976
3977 # set metadata for subvolume.
3978 key = "key"
3979 value = "value"
3980 try:
3981 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3982 except CommandFailedError:
3983 self.fail("expected the 'fs subvolume metadata set' command to succeed")
3984
3985 # get value for specified key.
3986 try:
3987 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3988 except CommandFailedError:
3989 self.fail("expected the 'fs subvolume metadata get' command to succeed")
3990
3991 # remove '\n' from returned value.
3992 ret = ret.strip('\n')
3993
3994 # match received value with expected value.
3995 self.assertEqual(value, ret)
3996
3997 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3998 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3999
4000 # verify trash dir is clean.
4001 self._wait_for_trash_empty()
4002
4003 def test_subvolume_user_metadata_list_and_remove_for_legacy_subvolume(self):
4004 subvolname = self._generate_random_subvolume_name()
4005 group = self._generate_random_group_name()
4006
4007 # emulate a old-fashioned subvolume in a custom group
4008 createpath = os.path.join(".", "volumes", group, subvolname)
4009 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
4010
4011 # set metadata for subvolume.
4012 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
4013
4014 for k, v in input_metadata_dict.items():
4015 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, k, v, "--group_name", group)
4016
4017 # list metadata
4018 try:
4019 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
4020 except CommandFailedError:
4021 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
4022
4023 ret_dict = json.loads(ret)
4024
4025 # compare output with expected output
4026 self.assertDictEqual(input_metadata_dict, ret_dict)
4027
4028 # remove metadata against specified key.
4029 try:
4030 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key_1", "--group_name", group)
4031 except CommandFailedError:
4032 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
4033
4034 # confirm key is removed by again fetching metadata
4035 try:
4036 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key_1", "--group_name", group)
4037 except CommandFailedError as e:
4038 self.assertEqual(e.exitstatus, errno.ENOENT)
4039 else:
4040 self.fail("Expected ENOENT because key_1 does not exist")
4041
4042 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4043 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4044
4045 # verify trash dir is clean.
4046 self._wait_for_trash_empty()
4047
4048 class TestSubvolumeGroupSnapshots(TestVolumesHelper):
4049 """Tests for FS subvolume group snapshot operations."""
4050 @unittest.skip("skipping subvolumegroup snapshot tests")
4051 def test_nonexistent_subvolume_group_snapshot_rm(self):
4052 subvolume = self._generate_random_subvolume_name()
4053 group = self._generate_random_group_name()
4054 snapshot = self._generate_random_snapshot_name()
4055
4056 # create group
4057 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4058
4059 # create subvolume in group
4060 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4061
4062 # snapshot group
4063 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4064
4065 # remove snapshot
4066 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4067
4068 # remove snapshot
4069 try:
4070 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4071 except CommandFailedError as ce:
4072 if ce.exitstatus != errno.ENOENT:
4073 raise
4074 else:
4075 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
4076
4077 # remove subvolume
4078 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4079
4080 # verify trash dir is clean
4081 self._wait_for_trash_empty()
4082
4083 # remove group
4084 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4085
4086 @unittest.skip("skipping subvolumegroup snapshot tests")
4087 def test_subvolume_group_snapshot_create_and_rm(self):
4088 subvolume = self._generate_random_subvolume_name()
4089 group = self._generate_random_group_name()
4090 snapshot = self._generate_random_snapshot_name()
4091
4092 # create group
4093 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4094
4095 # create subvolume in group
4096 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4097
4098 # snapshot group
4099 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4100
4101 # remove snapshot
4102 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4103
4104 # remove subvolume
4105 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4106
4107 # verify trash dir is clean
4108 self._wait_for_trash_empty()
4109
4110 # remove group
4111 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4112
4113 @unittest.skip("skipping subvolumegroup snapshot tests")
4114 def test_subvolume_group_snapshot_idempotence(self):
4115 subvolume = self._generate_random_subvolume_name()
4116 group = self._generate_random_group_name()
4117 snapshot = self._generate_random_snapshot_name()
4118
4119 # create group
4120 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4121
4122 # create subvolume in group
4123 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4124
4125 # snapshot group
4126 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4127
4128 # try creating snapshot w/ same snapshot name -- shoule be idempotent
4129 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4130
4131 # remove snapshot
4132 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4133
4134 # remove subvolume
4135 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4136
4137 # verify trash dir is clean
4138 self._wait_for_trash_empty()
4139
4140 # remove group
4141 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4142
4143 @unittest.skip("skipping subvolumegroup snapshot tests")
4144 def test_subvolume_group_snapshot_ls(self):
4145 # tests the 'fs subvolumegroup snapshot ls' command
4146
4147 snapshots = []
4148
4149 # create group
4150 group = self._generate_random_group_name()
4151 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4152
4153 # create subvolumegroup snapshots
4154 snapshots = self._generate_random_snapshot_name(3)
4155 for snapshot in snapshots:
4156 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4157
4158 subvolgrpsnapshotls = json.loads(self._fs_cmd('subvolumegroup', 'snapshot', 'ls', self.volname, group))
4159 if len(subvolgrpsnapshotls) == 0:
4160 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
4161 else:
4162 snapshotnames = [snapshot['name'] for snapshot in subvolgrpsnapshotls]
4163 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
4164 raise RuntimeError("Error creating or listing subvolume group snapshots")
4165
4166 @unittest.skip("skipping subvolumegroup snapshot tests")
4167 def test_subvolume_group_snapshot_rm_force(self):
4168 # test removing non-existing subvolume group snapshot with --force
4169 group = self._generate_random_group_name()
4170 snapshot = self._generate_random_snapshot_name()
4171 # remove snapshot
4172 try:
4173 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force")
4174 except CommandFailedError:
4175 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
4176
4177 def test_subvolume_group_snapshot_unsupported_status(self):
4178 group = self._generate_random_group_name()
4179 snapshot = self._generate_random_snapshot_name()
4180
4181 # create group
4182 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4183
4184 # snapshot group
4185 try:
4186 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4187 except CommandFailedError as ce:
4188 self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create")
4189 else:
4190 self.fail("expected subvolumegroup snapshot create command to fail")
4191
4192 # remove group
4193 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4194
4195
4196 class TestSubvolumeSnapshots(TestVolumesHelper):
4197 """Tests for FS subvolume snapshot operations."""
4198 def test_nonexistent_subvolume_snapshot_rm(self):
4199 subvolume = self._generate_random_subvolume_name()
4200 snapshot = self._generate_random_snapshot_name()
4201
4202 # create subvolume
4203 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4204
4205 # snapshot subvolume
4206 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4207
4208 # remove snapshot
4209 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4210
4211 # remove snapshot again
4212 try:
4213 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4214 except CommandFailedError as ce:
4215 if ce.exitstatus != errno.ENOENT:
4216 raise
4217 else:
4218 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
4219
4220 # remove subvolume
4221 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4222
4223 # verify trash dir is clean
4224 self._wait_for_trash_empty()
4225
4226 def test_subvolume_snapshot_create_and_rm(self):
4227 subvolume = self._generate_random_subvolume_name()
4228 snapshot = self._generate_random_snapshot_name()
4229
4230 # create subvolume
4231 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4232
4233 # snapshot subvolume
4234 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4235
4236 # remove snapshot
4237 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4238
4239 # remove subvolume
4240 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4241
4242 # verify trash dir is clean
4243 self._wait_for_trash_empty()
4244
4245 def test_subvolume_snapshot_create_idempotence(self):
4246 subvolume = self._generate_random_subvolume_name()
4247 snapshot = self._generate_random_snapshot_name()
4248
4249 # create subvolume
4250 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4251
4252 # snapshot subvolume
4253 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4254
4255 # try creating w/ same subvolume snapshot name -- should be idempotent
4256 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4257
4258 # remove snapshot
4259 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4260
4261 # remove subvolume
4262 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4263
4264 # verify trash dir is clean
4265 self._wait_for_trash_empty()
4266
4267 def test_subvolume_snapshot_info(self):
4268
4269 """
4270 tests the 'fs subvolume snapshot info' command
4271 """
4272
4273 snap_md = ["created_at", "data_pool", "has_pending_clones"]
4274
4275 subvolume = self._generate_random_subvolume_name()
4276 snapshot, snap_missing = self._generate_random_snapshot_name(2)
4277
4278 # create subvolume
4279 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
4280
4281 # do some IO
4282 self._do_subvolume_io(subvolume, number_of_files=1)
4283
4284 # snapshot subvolume
4285 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4286
4287 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
4288 for md in snap_md:
4289 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4290 self.assertEqual(snap_info["has_pending_clones"], "no")
4291
4292 # snapshot info for non-existent snapshot
4293 try:
4294 self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing)
4295 except CommandFailedError as ce:
4296 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot")
4297 else:
4298 self.fail("expected snapshot info of non-existent snapshot to fail")
4299
4300 # remove snapshot
4301 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4302
4303 # remove subvolume
4304 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4305
4306 # verify trash dir is clean
4307 self._wait_for_trash_empty()
4308
4309 def test_subvolume_snapshot_in_group(self):
4310 subvolume = self._generate_random_subvolume_name()
4311 group = self._generate_random_group_name()
4312 snapshot = self._generate_random_snapshot_name()
4313
4314 # create group
4315 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4316
4317 # create subvolume in group
4318 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4319
4320 # snapshot subvolume in group
4321 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
4322
4323 # remove snapshot
4324 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
4325
4326 # remove subvolume
4327 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4328
4329 # verify trash dir is clean
4330 self._wait_for_trash_empty()
4331
4332 # remove group
4333 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4334
4335 def test_subvolume_snapshot_ls(self):
4336 # tests the 'fs subvolume snapshot ls' command
4337
4338 snapshots = []
4339
4340 # create subvolume
4341 subvolume = self._generate_random_subvolume_name()
4342 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4343
4344 # create subvolume snapshots
4345 snapshots = self._generate_random_snapshot_name(3)
4346 for snapshot in snapshots:
4347 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4348
4349 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
4350 if len(subvolsnapshotls) == 0:
4351 self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
4352 else:
4353 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
4354 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
4355 self.fail("Error creating or listing subvolume snapshots")
4356
4357 # remove snapshot
4358 for snapshot in snapshots:
4359 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4360
4361 # remove subvolume
4362 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4363
4364 # verify trash dir is clean
4365 self._wait_for_trash_empty()
4366
4367 def test_subvolume_inherited_snapshot_ls(self):
4368 # tests the scenario where 'fs subvolume snapshot ls' command
4369 # should not list inherited snapshots created as part of snapshot
4370 # at ancestral level
4371
4372 snapshots = []
4373 subvolume = self._generate_random_subvolume_name()
4374 group = self._generate_random_group_name()
4375 snap_count = 3
4376
4377 # create group
4378 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4379
4380 # create subvolume in group
4381 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4382
4383 # create subvolume snapshots
4384 snapshots = self._generate_random_snapshot_name(snap_count)
4385 for snapshot in snapshots:
4386 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
4387
4388 # Create snapshot at ancestral level
4389 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1")
4390 ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2")
4391 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1, ancestral_snappath2], sudo=True)
4392
4393 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group))
4394 self.assertEqual(len(subvolsnapshotls), snap_count)
4395
4396 # remove ancestral snapshots
4397 self.mount_a.run_shell(['rmdir', ancestral_snappath1, ancestral_snappath2], sudo=True)
4398
4399 # remove snapshot
4400 for snapshot in snapshots:
4401 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
4402
4403 # remove subvolume
4404 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4405
4406 # verify trash dir is clean
4407 self._wait_for_trash_empty()
4408
4409 # remove group
4410 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4411
4412 def test_subvolume_inherited_snapshot_info(self):
4413 """
4414 tests the scenario where 'fs subvolume snapshot info' command
4415 should fail for inherited snapshots created as part of snapshot
4416 at ancestral level
4417 """
4418
4419 subvolume = self._generate_random_subvolume_name()
4420 group = self._generate_random_group_name()
4421
4422 # create group
4423 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4424
4425 # create subvolume in group
4426 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4427
4428 # Create snapshot at ancestral level
4429 ancestral_snap_name = "ancestral_snap_1"
4430 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
4431 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
4432
4433 # Validate existence of inherited snapshot
4434 group_path = os.path.join(".", "volumes", group)
4435 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
4436 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
4437 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
4438 self.mount_a.run_shell(['ls', inherited_snappath])
4439
4440 # snapshot info on inherited snapshot
4441 try:
4442 self._get_subvolume_snapshot_info(self.volname, subvolume, inherited_snap, group)
4443 except CommandFailedError as ce:
4444 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on snapshot info of inherited snapshot")
4445 else:
4446 self.fail("expected snapshot info of inherited snapshot to fail")
4447
4448 # remove ancestral snapshots
4449 self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
4450
4451 # remove subvolume
4452 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
4453
4454 # verify trash dir is clean
4455 self._wait_for_trash_empty()
4456
4457 # remove group
4458 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4459
4460 def test_subvolume_inherited_snapshot_rm(self):
4461 """
4462 tests the scenario where 'fs subvolume snapshot rm' command
4463 should fail for inherited snapshots created as part of snapshot
4464 at ancestral level
4465 """
4466
4467 subvolume = self._generate_random_subvolume_name()
4468 group = self._generate_random_group_name()
4469
4470 # create group
4471 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4472
4473 # create subvolume in group
4474 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4475
4476 # Create snapshot at ancestral level
4477 ancestral_snap_name = "ancestral_snap_1"
4478 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
4479 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
4480
4481 # Validate existence of inherited snap
4482 group_path = os.path.join(".", "volumes", group)
4483 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
4484 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
4485 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
4486 self.mount_a.run_shell(['ls', inherited_snappath])
4487
4488 # inherited snapshot should not be deletable
4489 try:
4490 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, inherited_snap, "--group_name", group)
4491 except CommandFailedError as ce:
4492 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when removing inherited snapshot")
4493 else:
4494 self.fail("expected removing inheirted snapshot to fail")
4495
4496 # remove ancestral snapshots
4497 self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
4498
4499 # remove subvolume
4500 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4501
4502 # verify trash dir is clean
4503 self._wait_for_trash_empty()
4504
4505 # remove group
4506 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4507
4508 def test_subvolume_subvolumegroup_snapshot_name_conflict(self):
4509 """
4510 tests the scenario where creation of subvolume snapshot name
4511 with same name as it's subvolumegroup snapshot name. This should
4512 fail.
4513 """
4514
4515 subvolume = self._generate_random_subvolume_name()
4516 group = self._generate_random_group_name()
4517 group_snapshot = self._generate_random_snapshot_name()
4518
4519 # create group
4520 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4521
4522 # create subvolume in group
4523 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4524
4525 # Create subvolumegroup snapshot
4526 group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot)
4527 self.mount_a.run_shell(['mkdir', '-p', group_snapshot_path], sudo=True)
4528
4529 # Validate existence of subvolumegroup snapshot
4530 self.mount_a.run_shell(['ls', group_snapshot_path])
4531
4532 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
4533 try:
4534 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, group_snapshot, "--group_name", group)
4535 except CommandFailedError as ce:
4536 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
4537 else:
4538 self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
4539
4540 # remove subvolumegroup snapshot
4541 self.mount_a.run_shell(['rmdir', group_snapshot_path], sudo=True)
4542
4543 # remove subvolume
4544 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4545
4546 # verify trash dir is clean
4547 self._wait_for_trash_empty()
4548
4549 # remove group
4550 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4551
4552 def test_subvolume_retain_snapshot_invalid_recreate(self):
4553 """
4554 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
4555 """
4556 subvolume = self._generate_random_subvolume_name()
4557 snapshot = self._generate_random_snapshot_name()
4558
4559 # create subvolume
4560 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4561
4562 # snapshot subvolume
4563 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4564
4565 # remove with snapshot retention
4566 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4567
4568 # recreate subvolume with an invalid pool
4569 data_pool = "invalid_pool"
4570 try:
4571 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
4572 except CommandFailedError as ce:
4573 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname")
4574 else:
4575 self.fail("expected recreate of subvolume with invalid poolname to fail")
4576
4577 # fetch info
4578 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4579 self.assertEqual(subvol_info["state"], "snapshot-retained",
4580 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4581
4582 # getpath
4583 try:
4584 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
4585 except CommandFailedError as ce:
4586 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
4587 else:
4588 self.fail("expected getpath of subvolume with retained snapshots to fail")
4589
4590 # remove snapshot (should remove volume)
4591 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4592
4593 # verify trash dir is clean
4594 self._wait_for_trash_empty()
4595
4596 def test_subvolume_retain_snapshot_recreate_subvolume(self):
4597 """
4598 ensure a retained subvolume can be recreated and further snapshotted
4599 """
4600 snap_md = ["created_at", "data_pool", "has_pending_clones"]
4601
4602 subvolume = self._generate_random_subvolume_name()
4603 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
4604
4605 # create subvolume
4606 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4607
4608 # snapshot subvolume
4609 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
4610
4611 # remove with snapshot retention
4612 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4613
4614 # fetch info
4615 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4616 self.assertEqual(subvol_info["state"], "snapshot-retained",
4617 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4618
4619 # recreate retained subvolume
4620 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4621
4622 # fetch info
4623 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4624 self.assertEqual(subvol_info["state"], "complete",
4625 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4626
4627 # snapshot info (older snapshot)
4628 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1))
4629 for md in snap_md:
4630 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4631 self.assertEqual(snap_info["has_pending_clones"], "no")
4632
4633 # snap-create (new snapshot)
4634 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
4635
4636 # remove with retain snapshots
4637 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4638
4639 # list snapshots
4640 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
4641 self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
4642 " created subvolume snapshots")
4643 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
4644 for snap in [snapshot1, snapshot2]:
4645 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
4646
4647 # remove snapshots (should remove volume)
4648 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
4649 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
4650
4651 # verify list subvolumes returns an empty list
4652 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4653 self.assertEqual(len(subvolumels), 0)
4654
4655 # verify trash dir is clean
4656 self._wait_for_trash_empty()
4657
4658 def test_subvolume_retain_snapshot_with_snapshots(self):
4659 """
4660 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
4661 also test allowed and dis-allowed operations on a retained subvolume
4662 """
4663 snap_md = ["created_at", "data_pool", "has_pending_clones"]
4664
4665 subvolume = self._generate_random_subvolume_name()
4666 snapshot = self._generate_random_snapshot_name()
4667
4668 # create subvolume
4669 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4670
4671 # snapshot subvolume
4672 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4673
4674 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4675 try:
4676 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4677 except CommandFailedError as ce:
4678 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of retained subvolume with snapshots")
4679 else:
4680 self.fail("expected rm of subvolume with retained snapshots to fail")
4681
4682 # remove with snapshot retention
4683 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4684
4685 # fetch info
4686 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4687 self.assertEqual(subvol_info["state"], "snapshot-retained",
4688 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4689
4690 ## test allowed ops in retained state
4691 # ls
4692 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4693 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
4694 self.assertEqual(subvolumes[0]['name'], subvolume,
4695 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
4696
4697 # snapshot info
4698 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
4699 for md in snap_md:
4700 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4701 self.assertEqual(snap_info["has_pending_clones"], "no")
4702
4703 # rm --force (allowed but should fail)
4704 try:
4705 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
4706 except CommandFailedError as ce:
4707 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
4708 else:
4709 self.fail("expected rm of subvolume with retained snapshots to fail")
4710
4711 # rm (allowed but should fail)
4712 try:
4713 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4714 except CommandFailedError as ce:
4715 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
4716 else:
4717 self.fail("expected rm of subvolume with retained snapshots to fail")
4718
4719 ## test disallowed ops
4720 # getpath
4721 try:
4722 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
4723 except CommandFailedError as ce:
4724 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
4725 else:
4726 self.fail("expected getpath of subvolume with retained snapshots to fail")
4727
4728 # resize
4729 nsize = self.DEFAULT_FILE_SIZE*1024*1024
4730 try:
4731 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
4732 except CommandFailedError as ce:
4733 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on resize of subvolume with retained snapshots")
4734 else:
4735 self.fail("expected resize of subvolume with retained snapshots to fail")
4736
4737 # snap-create
4738 try:
4739 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, "fail")
4740 except CommandFailedError as ce:
4741 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot create of subvolume with retained snapshots")
4742 else:
4743 self.fail("expected snapshot create of subvolume with retained snapshots to fail")
4744
4745 # remove snapshot (should remove volume)
4746 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4747
4748 # verify list subvolumes returns an empty list
4749 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4750 self.assertEqual(len(subvolumels), 0)
4751
4752 # verify trash dir is clean
4753 self._wait_for_trash_empty()
4754
4755 def test_subvolume_retain_snapshot_without_snapshots(self):
4756 """
4757 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
4758 """
4759 subvolume = self._generate_random_subvolume_name()
4760
4761 # create subvolume
4762 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4763
4764 # remove with snapshot retention (should remove volume, no snapshots to retain)
4765 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4766
4767 # verify list subvolumes returns an empty list
4768 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4769 self.assertEqual(len(subvolumels), 0)
4770
4771 # verify trash dir is clean
4772 self._wait_for_trash_empty()
4773
4774 def test_subvolume_retain_snapshot_trash_busy_recreate(self):
4775 """
4776 ensure retained subvolume recreate fails if its trash is not yet purged
4777 """
4778 subvolume = self._generate_random_subvolume_name()
4779 snapshot = self._generate_random_snapshot_name()
4780
4781 # create subvolume
4782 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4783
4784 # snapshot subvolume
4785 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4786
4787 # remove with snapshot retention
4788 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4789
4790 # fake a trash entry
4791 self._update_fake_trash(subvolume)
4792
4793 # recreate subvolume
4794 try:
4795 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4796 except CommandFailedError as ce:
4797 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of subvolume with purge pending")
4798 else:
4799 self.fail("expected recreate of subvolume with purge pending to fail")
4800
4801 # clear fake trash entry
4802 self._update_fake_trash(subvolume, create=False)
4803
4804 # recreate subvolume
4805 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4806
4807 # remove snapshot
4808 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4809
4810 # remove subvolume
4811 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4812
4813 # verify trash dir is clean
4814 self._wait_for_trash_empty()
4815
4816 def test_subvolume_rm_with_snapshots(self):
4817 subvolume = self._generate_random_subvolume_name()
4818 snapshot = self._generate_random_snapshot_name()
4819
4820 # create subvolume
4821 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4822
4823 # snapshot subvolume
4824 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4825
4826 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4827 try:
4828 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4829 except CommandFailedError as ce:
4830 if ce.exitstatus != errno.ENOTEMPTY:
4831 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
4832 else:
4833 raise RuntimeError("expected subvolume deletion to fail")
4834
4835 # remove snapshot
4836 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4837
4838 # remove subvolume
4839 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4840
4841 # verify trash dir is clean
4842 self._wait_for_trash_empty()
4843
4844 def test_subvolume_snapshot_protect_unprotect_sanity(self):
4845 """
4846 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
4847 invoking the command does not cause errors, till they are removed from a subsequent release.
4848 """
4849 subvolume = self._generate_random_subvolume_name()
4850 snapshot = self._generate_random_snapshot_name()
4851 clone = self._generate_random_clone_name()
4852
4853 # create subvolume
4854 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
4855
4856 # do some IO
4857 self._do_subvolume_io(subvolume, number_of_files=64)
4858
4859 # snapshot subvolume
4860 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4861
4862 # now, protect snapshot
4863 self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot)
4864
4865 # schedule a clone
4866 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4867
4868 # check clone status
4869 self._wait_for_clone_to_complete(clone)
4870
4871 # now, unprotect snapshot
4872 self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot)
4873
4874 # verify clone
4875 self._verify_clone(subvolume, snapshot, clone)
4876
4877 # remove snapshot
4878 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4879
4880 # remove subvolumes
4881 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4882 self._fs_cmd("subvolume", "rm", self.volname, clone)
4883
4884 # verify trash dir is clean
4885 self._wait_for_trash_empty()
4886
4887 def test_subvolume_snapshot_rm_force(self):
4888 # test removing non existing subvolume snapshot with --force
4889 subvolume = self._generate_random_subvolume_name()
4890 snapshot = self._generate_random_snapshot_name()
4891
4892 # remove snapshot
4893 try:
4894 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force")
4895 except CommandFailedError:
4896 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
4897
4898 def test_subvolume_snapshot_metadata_set(self):
4899 """
4900 Set custom metadata for subvolume snapshot.
4901 """
4902 subvolname = self._generate_random_subvolume_name()
4903 group = self._generate_random_group_name()
4904 snapshot = self._generate_random_snapshot_name()
4905
4906 # create group.
4907 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4908
4909 # create subvolume in group.
4910 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4911
4912 # snapshot subvolume
4913 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4914
4915 # set metadata for snapshot.
4916 key = "key"
4917 value = "value"
4918 try:
4919 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
4920 except CommandFailedError:
4921 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
4922
4923 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4924 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4925 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4926
4927 # verify trash dir is clean.
4928 self._wait_for_trash_empty()
4929
4930 def test_subvolume_snapshot_metadata_set_idempotence(self):
4931 """
4932 Set custom metadata for subvolume snapshot (Idempotency).
4933 """
4934 subvolname = self._generate_random_subvolume_name()
4935 group = self._generate_random_group_name()
4936 snapshot = self._generate_random_snapshot_name()
4937
4938 # create group.
4939 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4940
4941 # create subvolume in group.
4942 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4943
4944 # snapshot subvolume
4945 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4946
4947 # set metadata for snapshot.
4948 key = "key"
4949 value = "value"
4950 try:
4951 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
4952 except CommandFailedError:
4953 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
4954
4955 # set same metadata again for subvolume.
4956 try:
4957 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
4958 except CommandFailedError:
4959 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed because it is idempotent operation")
4960
4961 # get value for specified key.
4962 try:
4963 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
4964 except CommandFailedError:
4965 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
4966
4967 # remove '\n' from returned value.
4968 ret = ret.strip('\n')
4969
4970 # match received value with expected value.
4971 self.assertEqual(value, ret)
4972
4973 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4974 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4975 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4976
4977 # verify trash dir is clean.
4978 self._wait_for_trash_empty()
4979
4980 def test_subvolume_snapshot_metadata_get(self):
4981 """
4982 Get custom metadata for a specified key in subvolume snapshot metadata.
4983 """
4984 subvolname = self._generate_random_subvolume_name()
4985 group = self._generate_random_group_name()
4986 snapshot = self._generate_random_snapshot_name()
4987
4988 # create group.
4989 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4990
4991 # create subvolume in group.
4992 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4993
4994 # snapshot subvolume
4995 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4996
4997 # set metadata for snapshot.
4998 key = "key"
4999 value = "value"
5000 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5001
5002 # get value for specified key.
5003 try:
5004 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5005 except CommandFailedError:
5006 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5007
5008 # remove '\n' from returned value.
5009 ret = ret.strip('\n')
5010
5011 # match received value with expected value.
5012 self.assertEqual(value, ret)
5013
5014 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5015 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5016 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5017
5018 # verify trash dir is clean.
5019 self._wait_for_trash_empty()
5020
5021 def test_subvolume_snapshot_metadata_get_for_nonexisting_key(self):
5022 """
5023 Get custom metadata for subvolume snapshot if specified key not exist in metadata.
5024 """
5025 subvolname = self._generate_random_subvolume_name()
5026 group = self._generate_random_group_name()
5027 snapshot = self._generate_random_snapshot_name()
5028
5029 # create group.
5030 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5031
5032 # create subvolume in group.
5033 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5034
5035 # snapshot subvolume
5036 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5037
5038 # set metadata for snapshot.
5039 key = "key"
5040 value = "value"
5041 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5042
5043 # try to get value for nonexisting key
5044 # Expecting ENOENT exit status because key does not exist
5045 try:
5046 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, "key_nonexist", group)
5047 except CommandFailedError as e:
5048 self.assertEqual(e.exitstatus, errno.ENOENT)
5049 else:
5050 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
5051
5052 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5053 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5054 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5055
5056 # verify trash dir is clean.
5057 self._wait_for_trash_empty()
5058
5059 def test_subvolume_snapshot_metadata_get_for_nonexisting_section(self):
5060 """
5061 Get custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5062 """
5063 subvolname = self._generate_random_subvolume_name()
5064 group = self._generate_random_group_name()
5065 snapshot = self._generate_random_snapshot_name()
5066
5067 # create group.
5068 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5069
5070 # create subvolume in group.
5071 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5072
5073 # snapshot subvolume
5074 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5075
5076 # try to get value for nonexisting key (as section does not exist)
5077 # Expecting ENOENT exit status because key does not exist
5078 try:
5079 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, "key", group)
5080 except CommandFailedError as e:
5081 self.assertEqual(e.exitstatus, errno.ENOENT)
5082 else:
5083 self.fail("Expected ENOENT because section does not exist")
5084
5085 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5086 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5087 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5088
5089 # verify trash dir is clean.
5090 self._wait_for_trash_empty()
5091
5092 def test_subvolume_snapshot_metadata_update(self):
5093 """
5094 Update custom metadata for a specified key in subvolume snapshot metadata.
5095 """
5096 subvolname = self._generate_random_subvolume_name()
5097 group = self._generate_random_group_name()
5098 snapshot = self._generate_random_snapshot_name()
5099
5100 # create group.
5101 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5102
5103 # create subvolume in group.
5104 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5105
5106 # snapshot subvolume
5107 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5108
5109 # set metadata for snapshot.
5110 key = "key"
5111 value = "value"
5112 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5113
5114 # update metadata against key.
5115 new_value = "new_value"
5116 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, new_value, group)
5117
5118 # get metadata for specified key of snapshot.
5119 try:
5120 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5121 except CommandFailedError:
5122 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5123
5124 # remove '\n' from returned value.
5125 ret = ret.strip('\n')
5126
5127 # match received value with expected value.
5128 self.assertEqual(new_value, ret)
5129
5130 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5131 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5132 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5133
5134 # verify trash dir is clean.
5135 self._wait_for_trash_empty()
5136
5137 def test_subvolume_snapshot_metadata_list(self):
5138 """
5139 List custom metadata for subvolume snapshot.
5140 """
5141 subvolname = self._generate_random_subvolume_name()
5142 group = self._generate_random_group_name()
5143 snapshot = self._generate_random_snapshot_name()
5144
5145 # create group.
5146 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5147
5148 # create subvolume in group.
5149 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5150
5151 # snapshot subvolume
5152 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5153
5154 # set metadata for subvolume.
5155 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
5156
5157 for k, v in input_metadata_dict.items():
5158 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, k, v, group)
5159
5160 # list metadata
5161 try:
5162 ret_dict = json.loads(self._fs_cmd("subvolume", "snapshot", "metadata", "ls", self.volname, subvolname, snapshot, group))
5163 except CommandFailedError:
5164 self.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5165
5166 # compare output with expected output
5167 self.assertDictEqual(input_metadata_dict, ret_dict)
5168
5169 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5170 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5171 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5172
5173 # verify trash dir is clean.
5174 self._wait_for_trash_empty()
5175
5176 def test_subvolume_snapshot_metadata_list_if_no_metadata_set(self):
5177 """
5178 List custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5179 """
5180 subvolname = self._generate_random_subvolume_name()
5181 group = self._generate_random_group_name()
5182 snapshot = self._generate_random_snapshot_name()
5183
5184 # create group.
5185 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5186
5187 # create subvolume in group.
5188 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5189
5190 # snapshot subvolume
5191 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5192
5193 # list metadata
5194 try:
5195 ret_dict = json.loads(self._fs_cmd("subvolume", "snapshot", "metadata", "ls", self.volname, subvolname, snapshot, group))
5196 except CommandFailedError:
5197 self.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5198
5199 # compare output with expected output
5200 empty_dict = {}
5201 self.assertDictEqual(ret_dict, empty_dict)
5202
5203 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5204 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5205 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5206
5207 # verify trash dir is clean.
5208 self._wait_for_trash_empty()
5209
5210 def test_subvolume_snapshot_metadata_remove(self):
5211 """
5212 Remove custom metadata for a specified key in subvolume snapshot metadata.
5213 """
5214 subvolname = self._generate_random_subvolume_name()
5215 group = self._generate_random_group_name()
5216 snapshot = self._generate_random_snapshot_name()
5217
5218 # create group.
5219 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5220
5221 # create subvolume in group.
5222 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5223
5224 # snapshot subvolume
5225 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5226
5227 # set metadata for snapshot.
5228 key = "key"
5229 value = "value"
5230 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5231
5232 # remove metadata against specified key.
5233 try:
5234 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group)
5235 except CommandFailedError:
5236 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5237
5238 # confirm key is removed by again fetching metadata
5239 try:
5240 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, key, snapshot, group)
5241 except CommandFailedError as e:
5242 self.assertEqual(e.exitstatus, errno.ENOENT)
5243 else:
5244 self.fail("Expected ENOENT because key does not exist")
5245
5246 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5247 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5248 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5249
5250 # verify trash dir is clean.
5251 self._wait_for_trash_empty()
5252
5253 def test_subvolume_snapshot_metadata_remove_for_nonexisting_key(self):
5254 """
5255 Remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5256 """
5257 subvolname = self._generate_random_subvolume_name()
5258 group = self._generate_random_group_name()
5259 snapshot = self._generate_random_snapshot_name()
5260
5261 # create group.
5262 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5263
5264 # create subvolume in group.
5265 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5266
5267 # snapshot subvolume
5268 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5269
5270 # set metadata for snapshot.
5271 key = "key"
5272 value = "value"
5273 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5274
5275 # try to remove value for nonexisting key
5276 # Expecting ENOENT exit status because key does not exist
5277 try:
5278 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, "key_nonexist", group)
5279 except CommandFailedError as e:
5280 self.assertEqual(e.exitstatus, errno.ENOENT)
5281 else:
5282 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
5283
5284 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5285 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5286 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5287
5288 # verify trash dir is clean.
5289 self._wait_for_trash_empty()
5290
5291 def test_subvolume_snapshot_metadata_remove_for_nonexisting_section(self):
5292 """
5293 Remove custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5294 """
5295 subvolname = self._generate_random_subvolume_name()
5296 group = self._generate_random_group_name()
5297 snapshot = self._generate_random_snapshot_name()
5298
5299 # create group.
5300 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5301
5302 # create subvolume in group.
5303 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5304
5305 # snapshot subvolume
5306 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5307
5308 # try to remove value for nonexisting key (as section does not exist)
5309 # Expecting ENOENT exit status because key does not exist
5310 try:
5311 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, "key", group)
5312 except CommandFailedError as e:
5313 self.assertEqual(e.exitstatus, errno.ENOENT)
5314 else:
5315 self.fail("Expected ENOENT because section does not exist")
5316
5317 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5318 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5319 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5320
5321 # verify trash dir is clean.
5322 self._wait_for_trash_empty()
5323
5324 def test_subvolume_snapshot_metadata_remove_force(self):
5325 """
5326 Forcefully remove custom metadata for a specified key in subvolume snapshot metadata.
5327 """
5328 subvolname = self._generate_random_subvolume_name()
5329 group = self._generate_random_group_name()
5330 snapshot = self._generate_random_snapshot_name()
5331
5332 # create group.
5333 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5334
5335 # create subvolume in group.
5336 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5337
5338 # snapshot subvolume
5339 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5340
5341 # set metadata for snapshot.
5342 key = "key"
5343 value = "value"
5344 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5345
5346 # remove metadata against specified key with --force option.
5347 try:
5348 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group, "--force")
5349 except CommandFailedError:
5350 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5351
5352 # confirm key is removed by again fetching metadata
5353 try:
5354 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5355 except CommandFailedError as e:
5356 self.assertEqual(e.exitstatus, errno.ENOENT)
5357 else:
5358 self.fail("Expected ENOENT because key does not exist")
5359
5360 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5361 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5362 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5363
5364 # verify trash dir is clean.
5365 self._wait_for_trash_empty()
5366
5367 def test_subvolume_snapshot_metadata_remove_force_for_nonexisting_key(self):
5368 """
5369 Forcefully remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5370 """
5371 subvolname = self._generate_random_subvolume_name()
5372 group = self._generate_random_group_name()
5373 snapshot = self._generate_random_snapshot_name()
5374
5375 # create group.
5376 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5377
5378 # create subvolume in group.
5379 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5380
5381 # snapshot subvolume
5382 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5383
5384 # set metadata for snapshot.
5385 key = "key"
5386 value = "value"
5387 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5388
5389 # remove metadata against specified key.
5390 try:
5391 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group)
5392 except CommandFailedError:
5393 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5394
5395 # confirm key is removed by again fetching metadata
5396 try:
5397 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5398 except CommandFailedError as e:
5399 self.assertEqual(e.exitstatus, errno.ENOENT)
5400 else:
5401 self.fail("Expected ENOENT because key does not exist")
5402
5403 # again remove metadata against already removed key with --force option.
5404 try:
5405 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group, "--force")
5406 except CommandFailedError:
5407 self.fail("expected the 'fs subvolume snapshot metadata rm' (with --force) command to succeed")
5408
5409 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5410 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5411 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5412
5413 # verify trash dir is clean.
5414 self._wait_for_trash_empty()
5415
5416 def test_subvolume_snapshot_metadata_after_snapshot_remove(self):
5417 """
5418 Verify metadata removal of subvolume snapshot after snapshot removal.
5419 """
5420 subvolname = self._generate_random_subvolume_name()
5421 group = self._generate_random_group_name()
5422 snapshot = self._generate_random_snapshot_name()
5423
5424 # create group.
5425 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5426
5427 # create subvolume in group.
5428 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5429
5430 # snapshot subvolume
5431 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5432
5433 # set metadata for snapshot.
5434 key = "key"
5435 value = "value"
5436 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5437
5438 # get value for specified key.
5439 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5440
5441 # remove '\n' from returned value.
5442 ret = ret.strip('\n')
5443
5444 # match received value with expected value.
5445 self.assertEqual(value, ret)
5446
5447 # remove subvolume snapshot.
5448 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5449
5450 # try to get metadata after removing snapshot.
5451 # Expecting error ENOENT with error message of snapshot does not exist
5452 cmd_ret = self.mgr_cluster.mon_manager.run_cluster_cmd(
5453 args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group],
5454 check_status=False, stdout=StringIO(), stderr=StringIO())
5455 self.assertEqual(cmd_ret.returncode, errno.ENOENT, "Expecting ENOENT error")
5456 self.assertIn(f"snapshot '{snapshot}' does not exist", cmd_ret.stderr.getvalue(),
5457 f"Expecting message: snapshot '{snapshot}' does not exist ")
5458
5459 # confirm metadata is removed by searching section name in .meta file
5460 meta_path = os.path.join(".", "volumes", group, subvolname, ".meta")
5461 section_name = "SNAP_METADATA_" + snapshot
5462
5463 try:
5464 self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False)
5465 except CommandFailedError as e:
5466 self.assertNotEqual(e.exitstatus, 0)
5467 else:
5468 self.fail("Expected non-zero exist status because section should not exist")
5469
5470 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5471 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5472
5473 # verify trash dir is clean.
5474 self._wait_for_trash_empty()
5475
5476 def test_clean_stale_subvolume_snapshot_metadata(self):
5477 """
5478 Validate cleaning of stale subvolume snapshot metadata.
5479 """
5480 subvolname = self._generate_random_subvolume_name()
5481 group = self._generate_random_group_name()
5482 snapshot = self._generate_random_snapshot_name()
5483
5484 # create group.
5485 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5486
5487 # create subvolume in group.
5488 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5489
5490 # snapshot subvolume
5491 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5492
5493 # set metadata for snapshot.
5494 key = "key"
5495 value = "value"
5496 try:
5497 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5498 except CommandFailedError:
5499 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5500
5501 # save the subvolume config file.
5502 meta_path = os.path.join(".", "volumes", group, subvolname, ".meta")
5503 tmp_meta_path = os.path.join(".", "volumes", group, subvolname, ".meta.stale_snap_section")
5504 self.mount_a.run_shell(['sudo', 'cp', '-p', meta_path, tmp_meta_path], omit_sudo=False)
5505
5506 # Delete snapshot, this would remove user snap metadata
5507 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5508
5509 # Copy back saved subvolume config file. This would have stale snapshot metadata
5510 self.mount_a.run_shell(['sudo', 'cp', '-p', tmp_meta_path, meta_path], omit_sudo=False)
5511
5512 # Verify that it has stale snapshot metadata
5513 section_name = "SNAP_METADATA_" + snapshot
5514 try:
5515 self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False)
5516 except CommandFailedError:
5517 self.fail("Expected grep cmd to succeed because stale snapshot metadata exist")
5518
5519 # Do any subvolume operation to clean the stale snapshot metadata
5520 _ = json.loads(self._get_subvolume_info(self.volname, subvolname, group))
5521
5522 # Verify that the stale snapshot metadata is cleaned
5523 try:
5524 self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False)
5525 except CommandFailedError as e:
5526 self.assertNotEqual(e.exitstatus, 0)
5527 else:
5528 self.fail("Expected non-zero exist status because stale snapshot metadata should not exist")
5529
5530 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5531 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5532
5533 # verify trash dir is clean.
5534 self._wait_for_trash_empty()
5535 # Clean tmp config file
5536 self.mount_a.run_shell(['sudo', 'rm', '-f', tmp_meta_path], omit_sudo=False)
5537
5538
5539 class TestSubvolumeSnapshotClones(TestVolumesHelper):
5540 """ Tests for FS subvolume snapshot clone operations."""
5541 def test_clone_subvolume_info(self):
5542 # tests the 'fs subvolume info' command for a clone
5543 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
5544 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
5545 "type", "uid"]
5546
5547 subvolume = self._generate_random_subvolume_name()
5548 snapshot = self._generate_random_snapshot_name()
5549 clone = self._generate_random_clone_name()
5550
5551 # create subvolume
5552 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5553
5554 # do some IO
5555 self._do_subvolume_io(subvolume, number_of_files=1)
5556
5557 # snapshot subvolume
5558 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5559
5560 # schedule a clone
5561 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5562
5563 # check clone status
5564 self._wait_for_clone_to_complete(clone)
5565
5566 # remove snapshot
5567 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5568
5569 subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
5570 if len(subvol_info) == 0:
5571 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
5572 for md in subvol_md:
5573 if md not in subvol_info.keys():
5574 raise RuntimeError("%s not present in the metadata of subvolume" % md)
5575 if subvol_info["type"] != "clone":
5576 raise RuntimeError("type should be set to clone")
5577
5578 # remove subvolumes
5579 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5580 self._fs_cmd("subvolume", "rm", self.volname, clone)
5581
5582 # verify trash dir is clean
5583 self._wait_for_trash_empty()
5584
5585 def test_subvolume_snapshot_info_without_snapshot_clone(self):
5586 """
5587 Verify subvolume snapshot info output without clonnnig snapshot.
5588 If no clone is performed then path /volumes/_index/clone/{track_id}
5589 will not exist.
5590 """
5591 subvolume = self._generate_random_subvolume_name()
5592 snapshot = self._generate_random_snapshot_name()
5593
5594 # create subvolume.
5595 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5596
5597 # snapshot subvolume
5598 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5599
5600 # list snapshot info
5601 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5602
5603 # verify snapshot info
5604 self.assertEqual(result['has_pending_clones'], "no")
5605 self.assertFalse('orphan_clones_count' in result)
5606 self.assertFalse('pending_clones' in result)
5607
5608 # remove snapshot, subvolume, clone
5609 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5610 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5611
5612 # verify trash dir is clean
5613 self._wait_for_trash_empty()
5614
5615 def test_subvolume_snapshot_info_if_no_clone_pending(self):
5616 """
5617 Verify subvolume snapshot info output if no clone is in pending state.
5618 """
5619 subvolume = self._generate_random_subvolume_name()
5620 snapshot = self._generate_random_snapshot_name()
5621 clone_list = [f'clone_{i}' for i in range(3)]
5622
5623 # create subvolume.
5624 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5625
5626 # snapshot subvolume
5627 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5628
5629 # schedule a clones
5630 for clone in clone_list:
5631 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5632
5633 # check clones status
5634 for clone in clone_list:
5635 self._wait_for_clone_to_complete(clone)
5636
5637 # list snapshot info
5638 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5639
5640 # verify snapshot info
5641 self.assertEqual(result['has_pending_clones'], "no")
5642 self.assertFalse('orphan_clones_count' in result)
5643 self.assertFalse('pending_clones' in result)
5644
5645 # remove snapshot, subvolume, clone
5646 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5647 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5648 for clone in clone_list:
5649 self._fs_cmd("subvolume", "rm", self.volname, clone)
5650
5651 # verify trash dir is clean
5652 self._wait_for_trash_empty()
5653
5654 def test_subvolume_snapshot_info_if_clone_pending_for_no_group(self):
5655 """
5656 Verify subvolume snapshot info output if clones are in pending state.
5657 Clones are not specified for particular target_group. Hence target_group
5658 should not be in the output as we don't show _nogroup (default group)
5659 """
5660 subvolume = self._generate_random_subvolume_name()
5661 snapshot = self._generate_random_snapshot_name()
5662 clone_list = [f'clone_{i}' for i in range(3)]
5663
5664 # create subvolume.
5665 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5666
5667 # snapshot subvolume
5668 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5669
5670 # insert delay at the beginning of snapshot clone
5671 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5672
5673 # schedule a clones
5674 for clone in clone_list:
5675 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5676
5677 # list snapshot info
5678 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5679
5680 # verify snapshot info
5681 expected_clone_list = []
5682 for clone in clone_list:
5683 expected_clone_list.append({"name": clone})
5684 self.assertEqual(result['has_pending_clones'], "yes")
5685 self.assertFalse('orphan_clones_count' in result)
5686 self.assertListEqual(result['pending_clones'], expected_clone_list)
5687 self.assertEqual(len(result['pending_clones']), 3)
5688
5689 # check clones status
5690 for clone in clone_list:
5691 self._wait_for_clone_to_complete(clone)
5692
5693 # remove snapshot, subvolume, clone
5694 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5695 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5696 for clone in clone_list:
5697 self._fs_cmd("subvolume", "rm", self.volname, clone)
5698
5699 # verify trash dir is clean
5700 self._wait_for_trash_empty()
5701
5702 def test_subvolume_snapshot_info_if_clone_pending_for_target_group(self):
5703 """
5704 Verify subvolume snapshot info output if clones are in pending state.
5705 Clones are not specified for target_group.
5706 """
5707 subvolume = self._generate_random_subvolume_name()
5708 snapshot = self._generate_random_snapshot_name()
5709 clone = self._generate_random_clone_name()
5710 group = self._generate_random_group_name()
5711 target_group = self._generate_random_group_name()
5712
5713 # create groups
5714 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5715 self._fs_cmd("subvolumegroup", "create", self.volname, target_group)
5716
5717 # create subvolume
5718 self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777")
5719
5720 # snapshot subvolume
5721 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
5722
5723 # insert delay at the beginning of snapshot clone
5724 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5725
5726 # schedule a clone
5727 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
5728 "--group_name", group, "--target_group_name", target_group)
5729
5730 # list snapshot info
5731 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot, "--group_name", group))
5732
5733 # verify snapshot info
5734 expected_clone_list = [{"name": clone, "target_group": target_group}]
5735 self.assertEqual(result['has_pending_clones'], "yes")
5736 self.assertFalse('orphan_clones_count' in result)
5737 self.assertListEqual(result['pending_clones'], expected_clone_list)
5738 self.assertEqual(len(result['pending_clones']), 1)
5739
5740 # check clone status
5741 self._wait_for_clone_to_complete(clone, clone_group=target_group)
5742
5743 # remove snapshot
5744 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
5745
5746 # remove subvolumes
5747 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
5748 self._fs_cmd("subvolume", "rm", self.volname, clone, target_group)
5749
5750 # remove groups
5751 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5752 self._fs_cmd("subvolumegroup", "rm", self.volname, target_group)
5753
5754 # verify trash dir is clean
5755 self._wait_for_trash_empty()
5756
5757 def test_subvolume_snapshot_info_if_orphan_clone(self):
5758 """
5759 Verify subvolume snapshot info output if orphan clones exists.
5760 Orphan clones should not list under pending clones.
5761 orphan_clones_count should display correct count of orphan clones'
5762 """
5763 subvolume = self._generate_random_subvolume_name()
5764 snapshot = self._generate_random_snapshot_name()
5765 clone_list = [f'clone_{i}' for i in range(3)]
5766
5767 # create subvolume.
5768 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5769
5770 # snapshot subvolume
5771 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5772
5773 # insert delay at the beginning of snapshot clone
5774 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5775
5776 # schedule a clones
5777 for clone in clone_list:
5778 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5779
5780 # remove track file for third clone to make it orphan
5781 meta_path = os.path.join(".", "volumes", "_nogroup", subvolume, ".meta")
5782 pending_clones_result = self.mount_a.run_shell(f"sudo grep \"clone snaps\" -A3 {meta_path}", omit_sudo=False, stdout=StringIO(), stderr=StringIO())
5783 third_clone_track_id = pending_clones_result.stdout.getvalue().splitlines()[3].split(" = ")[0]
5784 third_clone_track_path = os.path.join(".", "volumes", "_index", "clone", third_clone_track_id)
5785 self.mount_a.run_shell(f"sudo rm -f {third_clone_track_path}", omit_sudo=False)
5786
5787 # list snapshot info
5788 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5789
5790 # verify snapshot info
5791 expected_clone_list = []
5792 for i in range(len(clone_list)-1):
5793 expected_clone_list.append({"name": clone_list[i]})
5794 self.assertEqual(result['has_pending_clones'], "yes")
5795 self.assertEqual(result['orphan_clones_count'], 1)
5796 self.assertListEqual(result['pending_clones'], expected_clone_list)
5797 self.assertEqual(len(result['pending_clones']), 2)
5798
5799 # check clones status
5800 for i in range(len(clone_list)-1):
5801 self._wait_for_clone_to_complete(clone_list[i])
5802
5803 # list snapshot info after cloning completion
5804 res = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5805
5806 # verify snapshot info (has_pending_clones should be no)
5807 self.assertEqual(res['has_pending_clones'], "no")
5808
5809 def test_non_clone_status(self):
5810 subvolume = self._generate_random_subvolume_name()
5811
5812 # create subvolume
5813 self._fs_cmd("subvolume", "create", self.volname, subvolume)
5814
5815 try:
5816 self._fs_cmd("clone", "status", self.volname, subvolume)
5817 except CommandFailedError as ce:
5818 if ce.exitstatus != errno.ENOTSUP:
5819 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
5820 else:
5821 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
5822
5823 # remove subvolume
5824 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5825
5826 # verify trash dir is clean
5827 self._wait_for_trash_empty()
5828
5829 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self):
5830 subvolume = self._generate_random_subvolume_name()
5831 snapshot = self._generate_random_snapshot_name()
5832 clone = self._generate_random_clone_name()
5833 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
5834
5835 # create subvolume, in an isolated namespace with a specified size
5836 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize), "--mode=777")
5837
5838 # do some IO
5839 self._do_subvolume_io(subvolume, number_of_files=8)
5840
5841 # snapshot subvolume
5842 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5843
5844 # create a pool different from current subvolume pool
5845 subvol_path = self._get_subvolume_path(self.volname, subvolume)
5846 default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
5847 new_pool = "new_pool"
5848 self.assertNotEqual(default_pool, new_pool)
5849 self.fs.add_data_pool(new_pool)
5850
5851 # update source subvolume pool
5852 self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="")
5853
5854 # schedule a clone, with NO --pool specification
5855 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5856
5857 # check clone status
5858 self._wait_for_clone_to_complete(clone)
5859
5860 # verify clone
5861 self._verify_clone(subvolume, snapshot, clone)
5862
5863 # remove snapshot
5864 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5865
5866 # remove subvolumes
5867 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5868 self._fs_cmd("subvolume", "rm", self.volname, clone)
5869
5870 # verify trash dir is clean
5871 self._wait_for_trash_empty()
5872
5873 def test_subvolume_clone_inherit_quota_attrs(self):
5874 subvolume = self._generate_random_subvolume_name()
5875 snapshot = self._generate_random_snapshot_name()
5876 clone = self._generate_random_clone_name()
5877 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
5878
5879 # create subvolume with a specified size
5880 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777", "--size", str(osize))
5881
5882 # do some IO
5883 self._do_subvolume_io(subvolume, number_of_files=8)
5884
5885 # get subvolume path
5886 subvolpath = self._get_subvolume_path(self.volname, subvolume)
5887
5888 # set quota on number of files
5889 self.mount_a.setfattr(subvolpath, 'ceph.quota.max_files', "20", sudo=True)
5890
5891 # snapshot subvolume
5892 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5893
5894 # schedule a clone
5895 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5896
5897 # check clone status
5898 self._wait_for_clone_to_complete(clone)
5899
5900 # verify clone
5901 self._verify_clone(subvolume, snapshot, clone)
5902
5903 # get subvolume path
5904 clonepath = self._get_subvolume_path(self.volname, clone)
5905
5906 # verify quota max_files is inherited from source snapshot
5907 subvol_quota = self.mount_a.getfattr(subvolpath, "ceph.quota.max_files")
5908 clone_quota = self.mount_a.getfattr(clonepath, "ceph.quota.max_files")
5909 self.assertEqual(subvol_quota, clone_quota)
5910
5911 # remove snapshot
5912 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5913
5914 # remove subvolumes
5915 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5916 self._fs_cmd("subvolume", "rm", self.volname, clone)
5917
5918 # verify trash dir is clean
5919 self._wait_for_trash_empty()
5920
5921 def test_subvolume_clone_in_progress_getpath(self):
5922 subvolume = self._generate_random_subvolume_name()
5923 snapshot = self._generate_random_snapshot_name()
5924 clone = self._generate_random_clone_name()
5925
5926 # create subvolume
5927 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5928
5929 # do some IO
5930 self._do_subvolume_io(subvolume, number_of_files=64)
5931
5932 # snapshot subvolume
5933 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5934
5935 # Insert delay at the beginning of snapshot clone
5936 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5937
5938 # schedule a clone
5939 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5940
5941 # clone should not be accessible right now
5942 try:
5943 self._get_subvolume_path(self.volname, clone)
5944 except CommandFailedError as ce:
5945 if ce.exitstatus != errno.EAGAIN:
5946 raise RuntimeError("invalid error code when fetching path of an pending clone")
5947 else:
5948 raise RuntimeError("expected fetching path of an pending clone to fail")
5949
5950 # check clone status
5951 self._wait_for_clone_to_complete(clone)
5952
5953 # clone should be accessible now
5954 subvolpath = self._get_subvolume_path(self.volname, clone)
5955 self.assertNotEqual(subvolpath, None)
5956
5957 # verify clone
5958 self._verify_clone(subvolume, snapshot, clone)
5959
5960 # remove snapshot
5961 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5962
5963 # remove subvolumes
5964 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5965 self._fs_cmd("subvolume", "rm", self.volname, clone)
5966
5967 # verify trash dir is clean
5968 self._wait_for_trash_empty()
5969
5970 def test_subvolume_clone_in_progress_snapshot_rm(self):
5971 subvolume = self._generate_random_subvolume_name()
5972 snapshot = self._generate_random_snapshot_name()
5973 clone = self._generate_random_clone_name()
5974
5975 # create subvolume
5976 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5977
5978 # do some IO
5979 self._do_subvolume_io(subvolume, number_of_files=64)
5980
5981 # snapshot subvolume
5982 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5983
5984 # Insert delay at the beginning of snapshot clone
5985 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5986
5987 # schedule a clone
5988 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5989
5990 # snapshot should not be deletable now
5991 try:
5992 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5993 except CommandFailedError as ce:
5994 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
5995 else:
5996 self.fail("expected removing source snapshot of a clone to fail")
5997
5998 # check clone status
5999 self._wait_for_clone_to_complete(clone)
6000
6001 # clone should be accessible now
6002 subvolpath = self._get_subvolume_path(self.volname, clone)
6003 self.assertNotEqual(subvolpath, None)
6004
6005 # verify clone
6006 self._verify_clone(subvolume, snapshot, clone)
6007
6008 # remove snapshot
6009 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6010
6011 # remove subvolumes
6012 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6013 self._fs_cmd("subvolume", "rm", self.volname, clone)
6014
6015 # verify trash dir is clean
6016 self._wait_for_trash_empty()
6017
6018 def test_subvolume_clone_in_progress_source(self):
6019 subvolume = self._generate_random_subvolume_name()
6020 snapshot = self._generate_random_snapshot_name()
6021 clone = self._generate_random_clone_name()
6022
6023 # create subvolume
6024 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6025
6026 # do some IO
6027 self._do_subvolume_io(subvolume, number_of_files=64)
6028
6029 # snapshot subvolume
6030 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6031
6032 # Insert delay at the beginning of snapshot clone
6033 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6034
6035 # schedule a clone
6036 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6037
6038 # verify clone source
6039 result = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
6040 source = result['status']['source']
6041 self.assertEqual(source['volume'], self.volname)
6042 self.assertEqual(source['subvolume'], subvolume)
6043 self.assertEqual(source.get('group', None), None)
6044 self.assertEqual(source['snapshot'], snapshot)
6045
6046 # check clone status
6047 self._wait_for_clone_to_complete(clone)
6048
6049 # clone should be accessible now
6050 subvolpath = self._get_subvolume_path(self.volname, clone)
6051 self.assertNotEqual(subvolpath, None)
6052
6053 # verify clone
6054 self._verify_clone(subvolume, snapshot, clone)
6055
6056 # remove snapshot
6057 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6058
6059 # remove subvolumes
6060 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6061 self._fs_cmd("subvolume", "rm", self.volname, clone)
6062
6063 # verify trash dir is clean
6064 self._wait_for_trash_empty()
6065
6066 def test_subvolume_clone_retain_snapshot_with_snapshots(self):
6067 """
6068 retain snapshots of a cloned subvolume and check disallowed operations
6069 """
6070 subvolume = self._generate_random_subvolume_name()
6071 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
6072 clone = self._generate_random_clone_name()
6073
6074 # create subvolume
6075 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6076
6077 # store path for clone verification
6078 subvol1_path = self._get_subvolume_path(self.volname, subvolume)
6079
6080 # do some IO
6081 self._do_subvolume_io(subvolume, number_of_files=16)
6082
6083 # snapshot subvolume
6084 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
6085
6086 # remove with snapshot retention
6087 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6088
6089 # clone retained subvolume snapshot
6090 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot1, clone)
6091
6092 # check clone status
6093 self._wait_for_clone_to_complete(clone)
6094
6095 # verify clone
6096 self._verify_clone(subvolume, snapshot1, clone, subvol_path=subvol1_path)
6097
6098 # create a snapshot on the clone
6099 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot2)
6100
6101 # retain a clone
6102 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
6103
6104 # list snapshots
6105 clonesnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, clone))
6106 self.assertEqual(len(clonesnapshotls), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
6107 " created subvolume snapshots")
6108 snapshotnames = [snapshot['name'] for snapshot in clonesnapshotls]
6109 for snap in [snapshot2]:
6110 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
6111
6112 ## check disallowed operations on retained clone
6113 # clone-status
6114 try:
6115 self._fs_cmd("clone", "status", self.volname, clone)
6116 except CommandFailedError as ce:
6117 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone status of clone with retained snapshots")
6118 else:
6119 self.fail("expected clone status of clone with retained snapshots to fail")
6120
6121 # clone-cancel
6122 try:
6123 self._fs_cmd("clone", "cancel", self.volname, clone)
6124 except CommandFailedError as ce:
6125 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone cancel of clone with retained snapshots")
6126 else:
6127 self.fail("expected clone cancel of clone with retained snapshots to fail")
6128
6129 # remove snapshots (removes subvolumes as all are in retained state)
6130 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
6131 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot2)
6132
6133 # verify list subvolumes returns an empty list
6134 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6135 self.assertEqual(len(subvolumels), 0)
6136
6137 # verify trash dir is clean
6138 self._wait_for_trash_empty()
6139
6140 def test_subvolume_retain_snapshot_clone(self):
6141 """
6142 clone a snapshot from a snapshot retained subvolume
6143 """
6144 subvolume = self._generate_random_subvolume_name()
6145 snapshot = self._generate_random_snapshot_name()
6146 clone = self._generate_random_clone_name()
6147
6148 # create subvolume
6149 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6150
6151 # store path for clone verification
6152 subvol_path = self._get_subvolume_path(self.volname, subvolume)
6153
6154 # do some IO
6155 self._do_subvolume_io(subvolume, number_of_files=16)
6156
6157 # snapshot subvolume
6158 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6159
6160 # remove with snapshot retention
6161 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6162
6163 # clone retained subvolume snapshot
6164 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6165
6166 # check clone status
6167 self._wait_for_clone_to_complete(clone)
6168
6169 # verify clone
6170 self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path)
6171
6172 # remove snapshots (removes retained volume)
6173 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6174
6175 # remove subvolume
6176 self._fs_cmd("subvolume", "rm", self.volname, clone)
6177
6178 # verify list subvolumes returns an empty list
6179 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6180 self.assertEqual(len(subvolumels), 0)
6181
6182 # verify trash dir is clean
6183 self._wait_for_trash_empty()
6184
6185 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self):
6186 """
6187 clone a subvolume from recreated subvolume's latest snapshot
6188 """
6189 subvolume = self._generate_random_subvolume_name()
6190 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
6191 clone = self._generate_random_clone_name(1)
6192
6193 # create subvolume
6194 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6195
6196 # do some IO
6197 self._do_subvolume_io(subvolume, number_of_files=16)
6198
6199 # snapshot subvolume
6200 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
6201
6202 # remove with snapshot retention
6203 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6204
6205 # recreate subvolume
6206 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6207
6208 # get and store path for clone verification
6209 subvol2_path = self._get_subvolume_path(self.volname, subvolume)
6210
6211 # do some IO
6212 self._do_subvolume_io(subvolume, number_of_files=16)
6213
6214 # snapshot newer subvolume
6215 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
6216
6217 # remove with snapshot retention
6218 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6219
6220 # clone retained subvolume's newer snapshot
6221 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot2, clone)
6222
6223 # check clone status
6224 self._wait_for_clone_to_complete(clone)
6225
6226 # verify clone
6227 self._verify_clone(subvolume, snapshot2, clone, subvol_path=subvol2_path)
6228
6229 # remove snapshot
6230 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
6231 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
6232
6233 # remove subvolume
6234 self._fs_cmd("subvolume", "rm", self.volname, clone)
6235
6236 # verify list subvolumes returns an empty list
6237 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6238 self.assertEqual(len(subvolumels), 0)
6239
6240 # verify trash dir is clean
6241 self._wait_for_trash_empty()
6242
6243 def test_subvolume_retain_snapshot_recreate(self):
6244 """
6245 recreate a subvolume from one of its retained snapshots
6246 """
6247 subvolume = self._generate_random_subvolume_name()
6248 snapshot = self._generate_random_snapshot_name()
6249
6250 # create subvolume
6251 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6252
6253 # store path for clone verification
6254 subvol_path = self._get_subvolume_path(self.volname, subvolume)
6255
6256 # do some IO
6257 self._do_subvolume_io(subvolume, number_of_files=16)
6258
6259 # snapshot subvolume
6260 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6261
6262 # remove with snapshot retention
6263 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6264
6265 # recreate retained subvolume using its own snapshot to clone
6266 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume)
6267
6268 # check clone status
6269 self._wait_for_clone_to_complete(subvolume)
6270
6271 # verify clone
6272 self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path)
6273
6274 # remove snapshot
6275 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6276
6277 # remove subvolume
6278 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6279
6280 # verify list subvolumes returns an empty list
6281 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6282 self.assertEqual(len(subvolumels), 0)
6283
6284 # verify trash dir is clean
6285 self._wait_for_trash_empty()
6286
6287 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self):
6288 """
6289 ensure retained clone recreate fails if its trash is not yet purged
6290 """
6291 subvolume = self._generate_random_subvolume_name()
6292 snapshot = self._generate_random_snapshot_name()
6293 clone = self._generate_random_clone_name()
6294
6295 # create subvolume
6296 self._fs_cmd("subvolume", "create", self.volname, subvolume)
6297
6298 # snapshot subvolume
6299 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6300
6301 # clone subvolume snapshot
6302 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6303
6304 # check clone status
6305 self._wait_for_clone_to_complete(clone)
6306
6307 # snapshot clone
6308 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot)
6309
6310 # remove clone with snapshot retention
6311 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
6312
6313 # fake a trash entry
6314 self._update_fake_trash(clone)
6315
6316 # clone subvolume snapshot (recreate)
6317 try:
6318 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6319 except CommandFailedError as ce:
6320 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending")
6321 else:
6322 self.fail("expected recreate of clone with purge pending to fail")
6323
6324 # clear fake trash entry
6325 self._update_fake_trash(clone, create=False)
6326
6327 # recreate subvolume
6328 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6329
6330 # check clone status
6331 self._wait_for_clone_to_complete(clone)
6332
6333 # remove snapshot
6334 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6335 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot)
6336
6337 # remove subvolume
6338 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6339 self._fs_cmd("subvolume", "rm", self.volname, clone)
6340
6341 # verify trash dir is clean
6342 self._wait_for_trash_empty()
6343
6344 def test_subvolume_snapshot_attr_clone(self):
6345 subvolume = self._generate_random_subvolume_name()
6346 snapshot = self._generate_random_snapshot_name()
6347 clone = self._generate_random_clone_name()
6348
6349 # create subvolume
6350 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6351
6352 # do some IO
6353 self._do_subvolume_io_mixed(subvolume)
6354
6355 # snapshot subvolume
6356 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6357
6358 # schedule a clone
6359 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6360
6361 # check clone status
6362 self._wait_for_clone_to_complete(clone)
6363
6364 # verify clone
6365 self._verify_clone(subvolume, snapshot, clone)
6366
6367 # remove snapshot
6368 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6369
6370 # remove subvolumes
6371 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6372 self._fs_cmd("subvolume", "rm", self.volname, clone)
6373
6374 # verify trash dir is clean
6375 self._wait_for_trash_empty()
6376
6377 def test_clone_failure_status_pending_in_progress_complete(self):
6378 """
6379 ensure failure status is not shown when clone is not in failed/cancelled state
6380 """
6381 subvolume = self._generate_random_subvolume_name()
6382 snapshot = self._generate_random_snapshot_name()
6383 clone1 = self._generate_random_clone_name()
6384
6385 # create subvolume
6386 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6387
6388 # do some IO
6389 self._do_subvolume_io(subvolume, number_of_files=200)
6390
6391 # snapshot subvolume
6392 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6393
6394 # Insert delay at the beginning of snapshot clone
6395 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6396
6397 # schedule a clone1
6398 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6399
6400 # pending clone shouldn't show failure status
6401 clone1_result = self._get_clone_status(clone1)
6402 try:
6403 clone1_result["status"]["failure"]["errno"]
6404 except KeyError as e:
6405 self.assertEqual(str(e), "'failure'")
6406 else:
6407 self.fail("clone status shouldn't show failure for pending clone")
6408
6409 # check clone1 to be in-progress
6410 self._wait_for_clone_to_be_in_progress(clone1)
6411
6412 # in-progress clone1 shouldn't show failure status
6413 clone1_result = self._get_clone_status(clone1)
6414 try:
6415 clone1_result["status"]["failure"]["errno"]
6416 except KeyError as e:
6417 self.assertEqual(str(e), "'failure'")
6418 else:
6419 self.fail("clone status shouldn't show failure for in-progress clone")
6420
6421 # wait for clone1 to complete
6422 self._wait_for_clone_to_complete(clone1)
6423
6424 # complete clone1 shouldn't show failure status
6425 clone1_result = self._get_clone_status(clone1)
6426 try:
6427 clone1_result["status"]["failure"]["errno"]
6428 except KeyError as e:
6429 self.assertEqual(str(e), "'failure'")
6430 else:
6431 self.fail("clone status shouldn't show failure for complete clone")
6432
6433 # remove snapshot
6434 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6435
6436 # remove subvolumes
6437 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6438 self._fs_cmd("subvolume", "rm", self.volname, clone1)
6439
6440 # verify trash dir is clean
6441 self._wait_for_trash_empty()
6442
6443 def test_clone_failure_status_failed(self):
6444 """
6445 ensure failure status is shown when clone is in failed state and validate the reason
6446 """
6447 subvolume = self._generate_random_subvolume_name()
6448 snapshot = self._generate_random_snapshot_name()
6449 clone1 = self._generate_random_clone_name()
6450
6451 # create subvolume
6452 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6453
6454 # do some IO
6455 self._do_subvolume_io(subvolume, number_of_files=200)
6456
6457 # snapshot subvolume
6458 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6459
6460 # Insert delay at the beginning of snapshot clone
6461 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6462
6463 # schedule a clone1
6464 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6465
6466 # remove snapshot from backend to force the clone failure.
6467 snappath = os.path.join(".", "volumes", "_nogroup", subvolume, ".snap", snapshot)
6468 self.mount_a.run_shell(['rmdir', snappath], sudo=True)
6469
6470 # wait for clone1 to fail.
6471 self._wait_for_clone_to_fail(clone1)
6472
6473 # check clone1 status
6474 clone1_result = self._get_clone_status(clone1)
6475 self.assertEqual(clone1_result["status"]["state"], "failed")
6476 self.assertEqual(clone1_result["status"]["failure"]["errno"], "2")
6477 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "snapshot '{0}' does not exist".format(snapshot))
6478
6479 # clone removal should succeed after failure, remove clone1
6480 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
6481
6482 # remove subvolumes
6483 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6484
6485 # verify trash dir is clean
6486 self._wait_for_trash_empty()
6487
6488 def test_clone_failure_status_pending_cancelled(self):
6489 """
6490 ensure failure status is shown when clone is cancelled during pending state and validate the reason
6491 """
6492 subvolume = self._generate_random_subvolume_name()
6493 snapshot = self._generate_random_snapshot_name()
6494 clone1 = self._generate_random_clone_name()
6495
6496 # create subvolume
6497 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6498
6499 # do some IO
6500 self._do_subvolume_io(subvolume, number_of_files=200)
6501
6502 # snapshot subvolume
6503 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6504
6505 # Insert delay at the beginning of snapshot clone
6506 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6507
6508 # schedule a clone1
6509 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6510
6511 # cancel pending clone1
6512 self._fs_cmd("clone", "cancel", self.volname, clone1)
6513
6514 # check clone1 status
6515 clone1_result = self._get_clone_status(clone1)
6516 self.assertEqual(clone1_result["status"]["state"], "canceled")
6517 self.assertEqual(clone1_result["status"]["failure"]["errno"], "4")
6518 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "user interrupted clone operation")
6519
6520 # clone removal should succeed with force after cancelled, remove clone1
6521 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
6522
6523 # remove snapshot
6524 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6525
6526 # remove subvolumes
6527 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6528
6529 # verify trash dir is clean
6530 self._wait_for_trash_empty()
6531
6532 def test_clone_failure_status_in_progress_cancelled(self):
6533 """
6534 ensure failure status is shown when clone is cancelled during in-progress state and validate the reason
6535 """
6536 subvolume = self._generate_random_subvolume_name()
6537 snapshot = self._generate_random_snapshot_name()
6538 clone1 = self._generate_random_clone_name()
6539
6540 # create subvolume
6541 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6542
6543 # do some IO
6544 self._do_subvolume_io(subvolume, number_of_files=200)
6545
6546 # snapshot subvolume
6547 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6548
6549 # Insert delay at the beginning of snapshot clone
6550 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6551
6552 # schedule a clone1
6553 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6554
6555 # wait for clone1 to be in-progress
6556 self._wait_for_clone_to_be_in_progress(clone1)
6557
6558 # cancel in-progess clone1
6559 self._fs_cmd("clone", "cancel", self.volname, clone1)
6560
6561 # check clone1 status
6562 clone1_result = self._get_clone_status(clone1)
6563 self.assertEqual(clone1_result["status"]["state"], "canceled")
6564 self.assertEqual(clone1_result["status"]["failure"]["errno"], "4")
6565 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "user interrupted clone operation")
6566
6567 # clone removal should succeed with force after cancelled, remove clone1
6568 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
6569
6570 # remove snapshot
6571 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6572
6573 # remove subvolumes
6574 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6575
6576 # verify trash dir is clean
6577 self._wait_for_trash_empty()
6578
6579 def test_subvolume_snapshot_clone(self):
6580 subvolume = self._generate_random_subvolume_name()
6581 snapshot = self._generate_random_snapshot_name()
6582 clone = self._generate_random_clone_name()
6583
6584 # create subvolume
6585 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6586
6587 # do some IO
6588 self._do_subvolume_io(subvolume, number_of_files=64)
6589
6590 # snapshot subvolume
6591 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6592
6593 # schedule a clone
6594 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6595
6596 # check clone status
6597 self._wait_for_clone_to_complete(clone)
6598
6599 # verify clone
6600 self._verify_clone(subvolume, snapshot, clone)
6601
6602 # remove snapshot
6603 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6604
6605 # remove subvolumes
6606 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6607 self._fs_cmd("subvolume", "rm", self.volname, clone)
6608
6609 # verify trash dir is clean
6610 self._wait_for_trash_empty()
6611
6612 def test_subvolume_snapshot_clone_quota_exceeded(self):
6613 subvolume = self._generate_random_subvolume_name()
6614 snapshot = self._generate_random_snapshot_name()
6615 clone = self._generate_random_clone_name()
6616
6617 # create subvolume with 20MB quota
6618 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
6619 self._fs_cmd("subvolume", "create", self.volname, subvolume,"--mode=777", "--size", str(osize))
6620
6621 # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
6622 self._do_subvolume_io(subvolume, number_of_files=50)
6623
6624 # snapshot subvolume
6625 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6626
6627 # schedule a clone
6628 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6629
6630 # check clone status
6631 self._wait_for_clone_to_complete(clone)
6632
6633 # verify clone
6634 self._verify_clone(subvolume, snapshot, clone)
6635
6636 # remove snapshot
6637 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6638
6639 # remove subvolumes
6640 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6641 self._fs_cmd("subvolume", "rm", self.volname, clone)
6642
6643 # verify trash dir is clean
6644 self._wait_for_trash_empty()
6645
6646 def test_subvolume_snapshot_in_complete_clone_rm(self):
6647 """
6648 Validates the removal of clone when it is not in 'complete|cancelled|failed' state.
6649 The forceful removl of subvolume clone succeeds only if it's in any of the
6650 'complete|cancelled|failed' states. It fails with EAGAIN in any other states.
6651 """
6652
6653 subvolume = self._generate_random_subvolume_name()
6654 snapshot = self._generate_random_snapshot_name()
6655 clone = self._generate_random_clone_name()
6656
6657 # create subvolume
6658 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6659
6660 # do some IO
6661 self._do_subvolume_io(subvolume, number_of_files=64)
6662
6663 # snapshot subvolume
6664 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6665
6666 # Insert delay at the beginning of snapshot clone
6667 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6668
6669 # schedule a clone
6670 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6671
6672 # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled.
6673 try:
6674 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6675 except CommandFailedError as ce:
6676 if ce.exitstatus != errno.EAGAIN:
6677 raise RuntimeError("invalid error code when trying to remove failed clone")
6678 else:
6679 raise RuntimeError("expected error when removing a failed clone")
6680
6681 # cancel on-going clone
6682 self._fs_cmd("clone", "cancel", self.volname, clone)
6683
6684 # verify canceled state
6685 self._check_clone_canceled(clone)
6686
6687 # clone removal should succeed after cancel
6688 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6689
6690 # remove snapshot
6691 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6692
6693 # remove subvolumes
6694 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6695
6696 # verify trash dir is clean
6697 self._wait_for_trash_empty()
6698
6699 def test_subvolume_snapshot_clone_retain_suid_guid(self):
6700 subvolume = self._generate_random_subvolume_name()
6701 snapshot = self._generate_random_snapshot_name()
6702 clone = self._generate_random_clone_name()
6703
6704 # create subvolume
6705 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6706
6707 # Create a file with suid, guid bits set along with executable bit.
6708 args = ["subvolume", "getpath", self.volname, subvolume]
6709 args = tuple(args)
6710 subvolpath = self._fs_cmd(*args)
6711 self.assertNotEqual(subvolpath, None)
6712 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
6713
6714 file_path = subvolpath
6715 file_path = os.path.join(subvolpath, "test_suid_file")
6716 self.mount_a.run_shell(["touch", file_path])
6717 self.mount_a.run_shell(["chmod", "u+sx,g+sx", file_path])
6718
6719 # snapshot subvolume
6720 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6721
6722 # schedule a clone
6723 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6724
6725 # check clone status
6726 self._wait_for_clone_to_complete(clone)
6727
6728 # verify clone
6729 self._verify_clone(subvolume, snapshot, clone)
6730
6731 # remove snapshot
6732 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6733
6734 # remove subvolumes
6735 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6736 self._fs_cmd("subvolume", "rm", self.volname, clone)
6737
6738 # verify trash dir is clean
6739 self._wait_for_trash_empty()
6740
6741 def test_subvolume_snapshot_clone_and_reclone(self):
6742 subvolume = self._generate_random_subvolume_name()
6743 snapshot = self._generate_random_snapshot_name()
6744 clone1, clone2 = self._generate_random_clone_name(2)
6745
6746 # create subvolume
6747 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6748
6749 # do some IO
6750 self._do_subvolume_io(subvolume, number_of_files=32)
6751
6752 # snapshot subvolume
6753 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6754
6755 # schedule a clone
6756 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6757
6758 # check clone status
6759 self._wait_for_clone_to_complete(clone1)
6760
6761 # verify clone
6762 self._verify_clone(subvolume, snapshot, clone1)
6763
6764 # remove snapshot
6765 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6766
6767 # now the clone is just like a normal subvolume -- snapshot the clone and fork
6768 # another clone. before that do some IO so it's can be differentiated.
6769 self._do_subvolume_io(clone1, create_dir="data", number_of_files=32)
6770
6771 # snapshot clone -- use same snap name
6772 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot)
6773
6774 # schedule a clone
6775 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2)
6776
6777 # check clone status
6778 self._wait_for_clone_to_complete(clone2)
6779
6780 # verify clone
6781 self._verify_clone(clone1, snapshot, clone2)
6782
6783 # remove snapshot
6784 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot)
6785
6786 # remove subvolumes
6787 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6788 self._fs_cmd("subvolume", "rm", self.volname, clone1)
6789 self._fs_cmd("subvolume", "rm", self.volname, clone2)
6790
6791 # verify trash dir is clean
6792 self._wait_for_trash_empty()
6793
6794 def test_subvolume_snapshot_clone_cancel_in_progress(self):
6795 subvolume = self._generate_random_subvolume_name()
6796 snapshot = self._generate_random_snapshot_name()
6797 clone = self._generate_random_clone_name()
6798
6799 # create subvolume
6800 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6801
6802 # do some IO
6803 self._do_subvolume_io(subvolume, number_of_files=128)
6804
6805 # snapshot subvolume
6806 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6807
6808 # Insert delay at the beginning of snapshot clone
6809 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6810
6811 # schedule a clone
6812 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6813
6814 # cancel on-going clone
6815 self._fs_cmd("clone", "cancel", self.volname, clone)
6816
6817 # verify canceled state
6818 self._check_clone_canceled(clone)
6819
6820 # remove snapshot
6821 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6822
6823 # remove subvolumes
6824 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6825 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6826
6827 # verify trash dir is clean
6828 self._wait_for_trash_empty()
6829
6830 def test_subvolume_snapshot_clone_cancel_pending(self):
6831 """
6832 this test is a bit more involved compared to canceling an in-progress clone.
6833 we'd need to ensure that a to-be canceled clone has still not been picked up
6834 by cloner threads. exploit the fact that clones are picked up in an FCFS
6835 fashion and there are four (4) cloner threads by default. When the number of
6836 cloner threads increase, this test _may_ start tripping -- so, the number of
6837 clone operations would need to be jacked up.
6838 """
6839 # default number of clone threads
6840 NR_THREADS = 4
6841 # good enough for 4 threads
6842 NR_CLONES = 5
6843 # yeh, 1gig -- we need the clone to run for sometime
6844 FILE_SIZE_MB = 1024
6845
6846 subvolume = self._generate_random_subvolume_name()
6847 snapshot = self._generate_random_snapshot_name()
6848 clones = self._generate_random_clone_name(NR_CLONES)
6849
6850 # create subvolume
6851 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6852
6853 # do some IO
6854 self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB)
6855
6856 # snapshot subvolume
6857 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6858
6859 # schedule clones
6860 for clone in clones:
6861 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6862
6863 to_wait = clones[0:NR_THREADS]
6864 to_cancel = clones[NR_THREADS:]
6865
6866 # cancel pending clones and verify
6867 for clone in to_cancel:
6868 status = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
6869 self.assertEqual(status["status"]["state"], "pending")
6870 self._fs_cmd("clone", "cancel", self.volname, clone)
6871 self._check_clone_canceled(clone)
6872
6873 # let's cancel on-going clones. handle the case where some of the clones
6874 # _just_ complete
6875 for clone in list(to_wait):
6876 try:
6877 self._fs_cmd("clone", "cancel", self.volname, clone)
6878 to_cancel.append(clone)
6879 to_wait.remove(clone)
6880 except CommandFailedError as ce:
6881 if ce.exitstatus != errno.EINVAL:
6882 raise RuntimeError("invalid error code when cancelling on-going clone")
6883
6884 # remove snapshot
6885 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6886
6887 # remove subvolumes
6888 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6889 for clone in to_wait:
6890 self._fs_cmd("subvolume", "rm", self.volname, clone)
6891 for clone in to_cancel:
6892 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6893
6894 # verify trash dir is clean
6895 self._wait_for_trash_empty()
6896
6897 def test_subvolume_snapshot_clone_different_groups(self):
6898 subvolume = self._generate_random_subvolume_name()
6899 snapshot = self._generate_random_snapshot_name()
6900 clone = self._generate_random_clone_name()
6901 s_group, c_group = self._generate_random_group_name(2)
6902
6903 # create groups
6904 self._fs_cmd("subvolumegroup", "create", self.volname, s_group)
6905 self._fs_cmd("subvolumegroup", "create", self.volname, c_group)
6906
6907 # create subvolume
6908 self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group, "--mode=777")
6909
6910 # do some IO
6911 self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32)
6912
6913 # snapshot subvolume
6914 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group)
6915
6916 # schedule a clone
6917 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
6918 '--group_name', s_group, '--target_group_name', c_group)
6919
6920 # check clone status
6921 self._wait_for_clone_to_complete(clone, clone_group=c_group)
6922
6923 # verify clone
6924 self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group)
6925
6926 # remove snapshot
6927 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group)
6928
6929 # remove subvolumes
6930 self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group)
6931 self._fs_cmd("subvolume", "rm", self.volname, clone, c_group)
6932
6933 # remove groups
6934 self._fs_cmd("subvolumegroup", "rm", self.volname, s_group)
6935 self._fs_cmd("subvolumegroup", "rm", self.volname, c_group)
6936
6937 # verify trash dir is clean
6938 self._wait_for_trash_empty()
6939
6940 def test_subvolume_snapshot_clone_fail_with_remove(self):
6941 subvolume = self._generate_random_subvolume_name()
6942 snapshot = self._generate_random_snapshot_name()
6943 clone1, clone2 = self._generate_random_clone_name(2)
6944
6945 pool_capacity = 32 * 1024 * 1024
6946 # number of files required to fill up 99% of the pool
6947 nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
6948
6949 # create subvolume
6950 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6951
6952 # do some IO
6953 self._do_subvolume_io(subvolume, number_of_files=nr_files)
6954
6955 # snapshot subvolume
6956 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6957
6958 # add data pool
6959 new_pool = "new_pool"
6960 self.fs.add_data_pool(new_pool)
6961
6962 self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
6963 "max_bytes", "{0}".format(pool_capacity // 4))
6964
6965 # schedule a clone
6966 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
6967
6968 # check clone status -- this should dramatically overshoot the pool quota
6969 self._wait_for_clone_to_complete(clone1)
6970
6971 # verify clone
6972 self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool)
6973
6974 # wait a bit so that subsequent I/O will give pool full error
6975 time.sleep(120)
6976
6977 # schedule a clone
6978 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool)
6979
6980 # check clone status
6981 self._wait_for_clone_to_fail(clone2)
6982
6983 # remove snapshot
6984 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6985
6986 # remove subvolumes
6987 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6988 self._fs_cmd("subvolume", "rm", self.volname, clone1)
6989 try:
6990 self._fs_cmd("subvolume", "rm", self.volname, clone2)
6991 except CommandFailedError as ce:
6992 if ce.exitstatus != errno.EAGAIN:
6993 raise RuntimeError("invalid error code when trying to remove failed clone")
6994 else:
6995 raise RuntimeError("expected error when removing a failed clone")
6996
6997 # ... and with force, failed clone can be removed
6998 self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force")
6999
7000 # verify trash dir is clean
7001 self._wait_for_trash_empty()
7002
7003 def test_subvolume_snapshot_clone_on_existing_subvolumes(self):
7004 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
7005 snapshot = self._generate_random_snapshot_name()
7006 clone = self._generate_random_clone_name()
7007
7008 # create subvolumes
7009 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--mode=777")
7010 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--mode=777")
7011
7012 # do some IO
7013 self._do_subvolume_io(subvolume1, number_of_files=32)
7014
7015 # snapshot subvolume
7016 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot)
7017
7018 # schedule a clone with target as subvolume2
7019 try:
7020 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2)
7021 except CommandFailedError as ce:
7022 if ce.exitstatus != errno.EEXIST:
7023 raise RuntimeError("invalid error code when cloning to existing subvolume")
7024 else:
7025 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
7026
7027 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
7028
7029 # schedule a clone with target as clone
7030 try:
7031 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
7032 except CommandFailedError as ce:
7033 if ce.exitstatus != errno.EEXIST:
7034 raise RuntimeError("invalid error code when cloning to existing clone")
7035 else:
7036 raise RuntimeError("expected cloning to fail if the target is an existing clone")
7037
7038 # check clone status
7039 self._wait_for_clone_to_complete(clone)
7040
7041 # verify clone
7042 self._verify_clone(subvolume1, snapshot, clone)
7043
7044 # remove snapshot
7045 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot)
7046
7047 # remove subvolumes
7048 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7049 self._fs_cmd("subvolume", "rm", self.volname, subvolume2)
7050 self._fs_cmd("subvolume", "rm", self.volname, clone)
7051
7052 # verify trash dir is clean
7053 self._wait_for_trash_empty()
7054
7055 def test_subvolume_snapshot_clone_pool_layout(self):
7056 subvolume = self._generate_random_subvolume_name()
7057 snapshot = self._generate_random_snapshot_name()
7058 clone = self._generate_random_clone_name()
7059
7060 # add data pool
7061 new_pool = "new_pool"
7062 newid = self.fs.add_data_pool(new_pool)
7063
7064 # create subvolume
7065 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
7066
7067 # do some IO
7068 self._do_subvolume_io(subvolume, number_of_files=32)
7069
7070 # snapshot subvolume
7071 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7072
7073 # schedule a clone
7074 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool)
7075
7076 # check clone status
7077 self._wait_for_clone_to_complete(clone)
7078
7079 # verify clone
7080 self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool)
7081
7082 # remove snapshot
7083 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7084
7085 subvol_path = self._get_subvolume_path(self.volname, clone)
7086 desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
7087 try:
7088 self.assertEqual(desired_pool, new_pool)
7089 except AssertionError:
7090 self.assertEqual(int(desired_pool), newid) # old kernel returns id
7091
7092 # remove subvolumes
7093 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7094 self._fs_cmd("subvolume", "rm", self.volname, clone)
7095
7096 # verify trash dir is clean
7097 self._wait_for_trash_empty()
7098
7099 def test_subvolume_snapshot_clone_under_group(self):
7100 subvolume = self._generate_random_subvolume_name()
7101 snapshot = self._generate_random_snapshot_name()
7102 clone = self._generate_random_clone_name()
7103 group = self._generate_random_group_name()
7104
7105 # create subvolume
7106 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
7107
7108 # do some IO
7109 self._do_subvolume_io(subvolume, number_of_files=32)
7110
7111 # snapshot subvolume
7112 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7113
7114 # create group
7115 self._fs_cmd("subvolumegroup", "create", self.volname, group)
7116
7117 # schedule a clone
7118 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group)
7119
7120 # check clone status
7121 self._wait_for_clone_to_complete(clone, clone_group=group)
7122
7123 # verify clone
7124 self._verify_clone(subvolume, snapshot, clone, clone_group=group)
7125
7126 # remove snapshot
7127 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7128
7129 # remove subvolumes
7130 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7131 self._fs_cmd("subvolume", "rm", self.volname, clone, group)
7132
7133 # remove group
7134 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7135
7136 # verify trash dir is clean
7137 self._wait_for_trash_empty()
7138
7139 def test_subvolume_snapshot_clone_with_attrs(self):
7140 subvolume = self._generate_random_subvolume_name()
7141 snapshot = self._generate_random_snapshot_name()
7142 clone = self._generate_random_clone_name()
7143
7144 mode = "777"
7145 uid = "1000"
7146 gid = "1000"
7147 new_uid = "1001"
7148 new_gid = "1001"
7149 new_mode = "700"
7150
7151 # create subvolume
7152 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
7153
7154 # do some IO
7155 self._do_subvolume_io(subvolume, number_of_files=32)
7156
7157 # snapshot subvolume
7158 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7159
7160 # change subvolume attrs (to ensure clone picks up snapshot attrs)
7161 self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode)
7162
7163 # schedule a clone
7164 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
7165
7166 # check clone status
7167 self._wait_for_clone_to_complete(clone)
7168
7169 # verify clone
7170 self._verify_clone(subvolume, snapshot, clone)
7171
7172 # remove snapshot
7173 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7174
7175 # remove subvolumes
7176 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7177 self._fs_cmd("subvolume", "rm", self.volname, clone)
7178
7179 # verify trash dir is clean
7180 self._wait_for_trash_empty()
7181
7182 def test_subvolume_snapshot_clone_with_upgrade(self):
7183 """
7184 yet another poor man's upgrade test -- rather than going through a full
7185 upgrade cycle, emulate old types subvolumes by going through the wormhole
7186 and verify clone operation.
7187 further ensure that a legacy volume is not updated to v2, but clone is.
7188 """
7189 subvolume = self._generate_random_subvolume_name()
7190 snapshot = self._generate_random_snapshot_name()
7191 clone = self._generate_random_clone_name()
7192
7193 # emulate a old-fashioned subvolume
7194 createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
7195 self.mount_a.run_shell_payload(f"mkdir -p -m 777 {createpath}", sudo=True)
7196
7197 # add required xattrs to subvolume
7198 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7199 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
7200
7201 # do some IO
7202 self._do_subvolume_io(subvolume, number_of_files=64)
7203
7204 # snapshot subvolume
7205 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7206
7207 # ensure metadata file is in legacy location, with required version v1
7208 self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
7209
7210 # Insert delay at the beginning of snapshot clone
7211 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7212
7213 # schedule a clone
7214 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
7215
7216 # snapshot should not be deletable now
7217 try:
7218 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7219 except CommandFailedError as ce:
7220 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
7221 else:
7222 self.fail("expected removing source snapshot of a clone to fail")
7223
7224 # check clone status
7225 self._wait_for_clone_to_complete(clone)
7226
7227 # verify clone
7228 self._verify_clone(subvolume, snapshot, clone, source_version=1)
7229
7230 # remove snapshot
7231 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7232
7233 # ensure metadata file is in v2 location, with required version v2
7234 self._assert_meta_location_and_version(self.volname, clone)
7235
7236 # remove subvolumes
7237 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7238 self._fs_cmd("subvolume", "rm", self.volname, clone)
7239
7240 # verify trash dir is clean
7241 self._wait_for_trash_empty()
7242
7243 def test_subvolume_snapshot_reconf_max_concurrent_clones(self):
7244 """
7245 Validate 'max_concurrent_clones' config option
7246 """
7247
7248 # get the default number of cloner threads
7249 default_max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7250 self.assertEqual(default_max_concurrent_clones, 4)
7251
7252 # Increase number of cloner threads
7253 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
7254 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7255 self.assertEqual(max_concurrent_clones, 6)
7256
7257 # Decrease number of cloner threads
7258 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7259 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7260 self.assertEqual(max_concurrent_clones, 2)
7261
7262 def test_subvolume_snapshot_config_snapshot_clone_delay(self):
7263 """
7264 Validate 'snapshot_clone_delay' config option
7265 """
7266
7267 # get the default delay before starting the clone
7268 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7269 self.assertEqual(default_timeout, 0)
7270
7271 # Insert delay of 2 seconds at the beginning of the snapshot clone
7272 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7273 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7274 self.assertEqual(default_timeout, 2)
7275
7276 # Decrease number of cloner threads
7277 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7278 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7279 self.assertEqual(max_concurrent_clones, 2)
7280
7281 def test_subvolume_under_group_snapshot_clone(self):
7282 subvolume = self._generate_random_subvolume_name()
7283 group = self._generate_random_group_name()
7284 snapshot = self._generate_random_snapshot_name()
7285 clone = self._generate_random_clone_name()
7286
7287 # create group
7288 self._fs_cmd("subvolumegroup", "create", self.volname, group)
7289
7290 # create subvolume
7291 self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777")
7292
7293 # do some IO
7294 self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32)
7295
7296 # snapshot subvolume
7297 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
7298
7299 # schedule a clone
7300 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group)
7301
7302 # check clone status
7303 self._wait_for_clone_to_complete(clone)
7304
7305 # verify clone
7306 self._verify_clone(subvolume, snapshot, clone, source_group=group)
7307
7308 # remove snapshot
7309 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
7310
7311 # remove subvolumes
7312 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
7313 self._fs_cmd("subvolume", "rm", self.volname, clone)
7314
7315 # remove group
7316 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7317
7318 # verify trash dir is clean
7319 self._wait_for_trash_empty()
7320
7321
7322 class TestMisc(TestVolumesHelper):
7323 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
7324 def test_connection_expiration(self):
7325 # unmount any cephfs mounts
7326 for i in range(0, self.CLIENTS_REQUIRED):
7327 self.mounts[i].umount_wait()
7328 sessions = self._session_list()
7329 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
7330
7331 # Get the mgr to definitely mount cephfs
7332 subvolume = self._generate_random_subvolume_name()
7333 self._fs_cmd("subvolume", "create", self.volname, subvolume)
7334 sessions = self._session_list()
7335 self.assertEqual(len(sessions), 1)
7336
7337 # Now wait for the mgr to expire the connection:
7338 self.wait_until_evicted(sessions[0]['id'], timeout=90)
7339
7340 def test_mgr_eviction(self):
7341 # unmount any cephfs mounts
7342 for i in range(0, self.CLIENTS_REQUIRED):
7343 self.mounts[i].umount_wait()
7344 sessions = self._session_list()
7345 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
7346
7347 # Get the mgr to definitely mount cephfs
7348 subvolume = self._generate_random_subvolume_name()
7349 self._fs_cmd("subvolume", "create", self.volname, subvolume)
7350 sessions = self._session_list()
7351 self.assertEqual(len(sessions), 1)
7352
7353 # Now fail the mgr, check the session was evicted
7354 mgr = self.mgr_cluster.get_active_id()
7355 self.mgr_cluster.mgr_fail(mgr)
7356 self.wait_until_evicted(sessions[0]['id'])
7357
7358 def test_names_can_only_be_goodchars(self):
7359 """
7360 Test the creating vols, subvols subvolgroups fails when their names uses
7361 characters beyond [a-zA-Z0-9 -_.].
7362 """
7363 volname, badname = 'testvol', 'abcd@#'
7364
7365 with self.assertRaises(CommandFailedError):
7366 self._fs_cmd('volume', 'create', badname)
7367 self._fs_cmd('volume', 'create', volname)
7368
7369 with self.assertRaises(CommandFailedError):
7370 self._fs_cmd('subvolumegroup', 'create', volname, badname)
7371
7372 with self.assertRaises(CommandFailedError):
7373 self._fs_cmd('subvolume', 'create', volname, badname)
7374 self._fs_cmd('volume', 'rm', volname, '--yes-i-really-mean-it')
7375
7376 def test_subvolume_ops_on_nonexistent_vol(self):
7377 # tests the fs subvolume operations on non existing volume
7378
7379 volname = "non_existent_subvolume"
7380
7381 # try subvolume operations
7382 for op in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
7383 try:
7384 if op == "resize":
7385 self._fs_cmd("subvolume", "resize", volname, "subvolname_1", "inf")
7386 elif op == "pin":
7387 self._fs_cmd("subvolume", "pin", volname, "subvolname_1", "export", "1")
7388 elif op == "ls":
7389 self._fs_cmd("subvolume", "ls", volname)
7390 else:
7391 self._fs_cmd("subvolume", op, volname, "subvolume_1")
7392 except CommandFailedError as ce:
7393 self.assertEqual(ce.exitstatus, errno.ENOENT)
7394 else:
7395 self.fail("expected the 'fs subvolume {0}' command to fail".format(op))
7396
7397 # try subvolume snapshot operations and clone create
7398 for op in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
7399 try:
7400 if op == "ls":
7401 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1")
7402 elif op == "clone":
7403 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1", "clone_1")
7404 else:
7405 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1")
7406 except CommandFailedError as ce:
7407 self.assertEqual(ce.exitstatus, errno.ENOENT)
7408 else:
7409 self.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op))
7410
7411 # try, clone status
7412 try:
7413 self._fs_cmd("clone", "status", volname, "clone_1")
7414 except CommandFailedError as ce:
7415 self.assertEqual(ce.exitstatus, errno.ENOENT)
7416 else:
7417 self.fail("expected the 'fs clone status' command to fail")
7418
7419 # try subvolumegroup operations
7420 for op in ("create", "rm", "getpath", "pin", "ls"):
7421 try:
7422 if op == "pin":
7423 self._fs_cmd("subvolumegroup", "pin", volname, "group_1", "export", "0")
7424 elif op == "ls":
7425 self._fs_cmd("subvolumegroup", op, volname)
7426 else:
7427 self._fs_cmd("subvolumegroup", op, volname, "group_1")
7428 except CommandFailedError as ce:
7429 self.assertEqual(ce.exitstatus, errno.ENOENT)
7430 else:
7431 self.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op))
7432
7433 # try subvolumegroup snapshot operations
7434 for op in ("create", "rm", "ls"):
7435 try:
7436 if op == "ls":
7437 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1")
7438 else:
7439 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1", "snapshot_1")
7440 except CommandFailedError as ce:
7441 self.assertEqual(ce.exitstatus, errno.ENOENT)
7442 else:
7443 self.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op))
7444
7445 def test_subvolume_upgrade_legacy_to_v1(self):
7446 """
7447 poor man's upgrade test -- rather than going through a full upgrade cycle,
7448 emulate subvolumes by going through the wormhole and verify if they are
7449 accessible.
7450 further ensure that a legacy volume is not updated to v2.
7451 """
7452 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
7453 group = self._generate_random_group_name()
7454
7455 # emulate a old-fashioned subvolume -- one in the default group and
7456 # the other in a custom group
7457 createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
7458 self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
7459
7460 # create group
7461 createpath2 = os.path.join(".", "volumes", group, subvolume2)
7462 self.mount_a.run_shell(['mkdir', '-p', createpath2], sudo=True)
7463
7464 # this would auto-upgrade on access without anyone noticing
7465 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
7466 self.assertNotEqual(subvolpath1, None)
7467 subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
7468
7469 subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
7470 self.assertNotEqual(subvolpath2, None)
7471 subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline
7472
7473 # and... the subvolume path returned should be what we created behind the scene
7474 self.assertEqual(createpath1[1:], subvolpath1)
7475 self.assertEqual(createpath2[1:], subvolpath2)
7476
7477 # ensure metadata file is in legacy location, with required version v1
7478 self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True)
7479 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True)
7480
7481 # remove subvolume
7482 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7483 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
7484
7485 # verify trash dir is clean
7486 self._wait_for_trash_empty()
7487
7488 # remove group
7489 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7490
7491 def test_subvolume_no_upgrade_v1_sanity(self):
7492 """
7493 poor man's upgrade test -- theme continues...
7494
7495 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
7496 a series of operations on the v1 subvolume to ensure they work as expected.
7497 """
7498 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
7499 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
7500 "type", "uid", "features", "state"]
7501 snap_md = ["created_at", "data_pool", "has_pending_clones"]
7502
7503 subvolume = self._generate_random_subvolume_name()
7504 snapshot = self._generate_random_snapshot_name()
7505 clone1, clone2 = self._generate_random_clone_name(2)
7506 mode = "777"
7507 uid = "1000"
7508 gid = "1000"
7509
7510 # emulate a v1 subvolume -- in the default group
7511 subvolume_path = self._create_v1_subvolume(subvolume)
7512
7513 # getpath
7514 subvolpath = self._get_subvolume_path(self.volname, subvolume)
7515 self.assertEqual(subvolpath, subvolume_path)
7516
7517 # ls
7518 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
7519 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
7520 self.assertEqual(subvolumes[0]['name'], subvolume,
7521 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
7522
7523 # info
7524 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
7525 for md in subvol_md:
7526 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
7527
7528 self.assertEqual(subvol_info["state"], "complete",
7529 msg="expected state to be 'complete', found '{0}".format(subvol_info["state"]))
7530 self.assertEqual(len(subvol_info["features"]), 2,
7531 msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
7532 for feature in ['snapshot-clone', 'snapshot-autoprotect']:
7533 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
7534
7535 # resize
7536 nsize = self.DEFAULT_FILE_SIZE*1024*1024*10
7537 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
7538 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
7539 for md in subvol_md:
7540 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
7541 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
7542
7543 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
7544 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
7545
7546 # do some IO
7547 self._do_subvolume_io(subvolume, number_of_files=8)
7548
7549 # snap-create
7550 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7551
7552 # clone
7553 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
7554
7555 # check clone status
7556 self._wait_for_clone_to_complete(clone1)
7557
7558 # ensure clone is v2
7559 self._assert_meta_location_and_version(self.volname, clone1, version=2)
7560
7561 # verify clone
7562 self._verify_clone(subvolume, snapshot, clone1, source_version=1)
7563
7564 # clone (older snapshot)
7565 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2)
7566
7567 # check clone status
7568 self._wait_for_clone_to_complete(clone2)
7569
7570 # ensure clone is v2
7571 self._assert_meta_location_and_version(self.volname, clone2, version=2)
7572
7573 # verify clone
7574 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
7575 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
7576
7577 # snap-info
7578 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
7579 for md in snap_md:
7580 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
7581 self.assertEqual(snap_info["has_pending_clones"], "no")
7582
7583 # snap-ls
7584 subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
7585 self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots)))
7586 snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots]
7587 for name in [snapshot, 'fake']:
7588 self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name))
7589
7590 # snap-rm
7591 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7592 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake")
7593
7594 # ensure volume is still at version 1
7595 self._assert_meta_location_and_version(self.volname, subvolume, version=1)
7596
7597 # rm
7598 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7599 self._fs_cmd("subvolume", "rm", self.volname, clone1)
7600 self._fs_cmd("subvolume", "rm", self.volname, clone2)
7601
7602 # verify trash dir is clean
7603 self._wait_for_trash_empty()
7604
7605 def test_subvolume_no_upgrade_v1_to_v2(self):
7606 """
7607 poor man's upgrade test -- theme continues...
7608 ensure v1 to v2 upgrades are not done automatically due to various states of v1
7609 """
7610 subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3)
7611 group = self._generate_random_group_name()
7612
7613 # emulate a v1 subvolume -- in the default group
7614 subvol1_path = self._create_v1_subvolume(subvolume1)
7615
7616 # emulate a v1 subvolume -- in a custom group
7617 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group)
7618
7619 # emulate a v1 subvolume -- in a clone pending state
7620 self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending')
7621
7622 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
7623 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
7624 self.assertEqual(subvolpath1, subvol1_path)
7625
7626 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
7627 self.assertEqual(subvolpath2, subvol2_path)
7628
7629 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
7630 # use clone status, as only certain operations are allowed in pending state
7631 status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3))
7632 self.assertEqual(status["status"]["state"], "pending")
7633
7634 # remove snapshot
7635 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake")
7636 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group)
7637
7638 # ensure metadata file is in v1 location, with version retained as v1
7639 self._assert_meta_location_and_version(self.volname, subvolume1, version=1)
7640 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1)
7641
7642 # remove subvolume
7643 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7644 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
7645 try:
7646 self._fs_cmd("subvolume", "rm", self.volname, subvolume3)
7647 except CommandFailedError as ce:
7648 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone")
7649 else:
7650 self.fail("expected rm of subvolume undergoing clone to fail")
7651
7652 # ensure metadata file is in v1 location, with version retained as v1
7653 self._assert_meta_location_and_version(self.volname, subvolume3, version=1)
7654 self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force")
7655
7656 # verify list subvolumes returns an empty list
7657 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
7658 self.assertEqual(len(subvolumels), 0)
7659
7660 # verify trash dir is clean
7661 self._wait_for_trash_empty()
7662
7663 def test_subvolume_upgrade_v1_to_v2(self):
7664 """
7665 poor man's upgrade test -- theme continues...
7666 ensure v1 to v2 upgrades work
7667 """
7668 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
7669 group = self._generate_random_group_name()
7670
7671 # emulate a v1 subvolume -- in the default group
7672 subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False)
7673
7674 # emulate a v1 subvolume -- in a custom group
7675 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False)
7676
7677 # this would attempt auto-upgrade on access
7678 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
7679 self.assertEqual(subvolpath1, subvol1_path)
7680
7681 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
7682 self.assertEqual(subvolpath2, subvol2_path)
7683
7684 # ensure metadata file is in v2 location, with version retained as v2
7685 self._assert_meta_location_and_version(self.volname, subvolume1, version=2)
7686 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2)
7687
7688 # remove subvolume
7689 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7690 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
7691
7692 # verify trash dir is clean
7693 self._wait_for_trash_empty()
7694
7695 def test_malicious_metafile_on_legacy_to_v1_upgrade(self):
7696 """
7697 Validate handcrafted .meta file on legacy subvol root doesn't break the system
7698 on legacy subvol upgrade to v1
7699 poor man's upgrade test -- theme continues...
7700 """
7701 subvol1, subvol2 = self._generate_random_subvolume_name(2)
7702
7703 # emulate a old-fashioned subvolume in the default group
7704 createpath1 = os.path.join(".", "volumes", "_nogroup", subvol1)
7705 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
7706
7707 # add required xattrs to subvolume
7708 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7709 self.mount_a.setfattr(createpath1, 'ceph.dir.layout.pool', default_pool, sudo=True)
7710
7711 # create v2 subvolume
7712 self._fs_cmd("subvolume", "create", self.volname, subvol2)
7713
7714 # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume
7715 # .meta into legacy subvol1's root
7716 subvol2_metapath = os.path.join(".", "volumes", "_nogroup", subvol2, ".meta")
7717 self.mount_a.run_shell(['sudo', 'cp', subvol2_metapath, createpath1], omit_sudo=False)
7718
7719 # Upgrade legacy subvol1 to v1
7720 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvol1)
7721 self.assertNotEqual(subvolpath1, None)
7722 subvolpath1 = subvolpath1.rstrip()
7723
7724 # the subvolume path returned should not be of subvol2 from handcrafted
7725 # .meta file
7726 self.assertEqual(createpath1[1:], subvolpath1)
7727
7728 # ensure metadata file is in legacy location, with required version v1
7729 self._assert_meta_location_and_version(self.volname, subvol1, version=1, legacy=True)
7730
7731 # Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2
7732 # path whose '.meta' file is copied to subvol1 root
7733 authid1 = "alice"
7734 self._fs_cmd("subvolume", "authorize", self.volname, subvol1, authid1)
7735
7736 # Validate that the mds path added is of subvol1 and not of subvol2
7737 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
7738 self.assertEqual("client.alice", out[0]["entity"])
7739 self.assertEqual("allow rw path={0}".format(createpath1[1:]), out[0]["caps"]["mds"])
7740
7741 # remove subvolume
7742 self._fs_cmd("subvolume", "rm", self.volname, subvol1)
7743 self._fs_cmd("subvolume", "rm", self.volname, subvol2)
7744
7745 # verify trash dir is clean
7746 self._wait_for_trash_empty()
7747
7748 def test_binary_metafile_on_legacy_to_v1_upgrade(self):
7749 """
7750 Validate binary .meta file on legacy subvol root doesn't break the system
7751 on legacy subvol upgrade to v1
7752 poor man's upgrade test -- theme continues...
7753 """
7754 subvol = self._generate_random_subvolume_name()
7755 group = self._generate_random_group_name()
7756
7757 # emulate a old-fashioned subvolume -- in a custom group
7758 createpath = os.path.join(".", "volumes", group, subvol)
7759 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
7760
7761 # add required xattrs to subvolume
7762 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7763 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
7764
7765 # Create unparseable binary .meta file on legacy subvol's root
7766 meta_contents = os.urandom(4096)
7767 meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta")
7768 self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True)
7769
7770 # Upgrade legacy subvol to v1
7771 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group)
7772 self.assertNotEqual(subvolpath, None)
7773 subvolpath = subvolpath.rstrip()
7774
7775 # The legacy subvolume path should be returned for subvol.
7776 # Should ignore unparseable binary .meta file in subvol's root
7777 self.assertEqual(createpath[1:], subvolpath)
7778
7779 # ensure metadata file is in legacy location, with required version v1
7780 self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True)
7781
7782 # remove subvolume
7783 self._fs_cmd("subvolume", "rm", self.volname, subvol, group)
7784
7785 # verify trash dir is clean
7786 self._wait_for_trash_empty()
7787
7788 # remove group
7789 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7790
7791 def test_unparseable_metafile_on_legacy_to_v1_upgrade(self):
7792 """
7793 Validate unparseable text .meta file on legacy subvol root doesn't break the system
7794 on legacy subvol upgrade to v1
7795 poor man's upgrade test -- theme continues...
7796 """
7797 subvol = self._generate_random_subvolume_name()
7798 group = self._generate_random_group_name()
7799
7800 # emulate a old-fashioned subvolume -- in a custom group
7801 createpath = os.path.join(".", "volumes", group, subvol)
7802 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
7803
7804 # add required xattrs to subvolume
7805 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7806 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
7807
7808 # Create unparseable text .meta file on legacy subvol's root
7809 meta_contents = "unparseable config\nfile ...\nunparseable config\nfile ...\n"
7810 meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta")
7811 self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True)
7812
7813 # Upgrade legacy subvol to v1
7814 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group)
7815 self.assertNotEqual(subvolpath, None)
7816 subvolpath = subvolpath.rstrip()
7817
7818 # The legacy subvolume path should be returned for subvol.
7819 # Should ignore unparseable binary .meta file in subvol's root
7820 self.assertEqual(createpath[1:], subvolpath)
7821
7822 # ensure metadata file is in legacy location, with required version v1
7823 self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True)
7824
7825 # remove subvolume
7826 self._fs_cmd("subvolume", "rm", self.volname, subvol, group)
7827
7828 # verify trash dir is clean
7829 self._wait_for_trash_empty()
7830
7831 # remove group
7832 self._fs_cmd("subvolumegroup", "rm", self.volname, group)