]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_volumes.py
import ceph 16.2.7
[ceph.git] / ceph / qa / tasks / cephfs / test_volumes.py
CommitLineData
81eedcae
TL
1import os
2import json
92f5a8d4 3import time
81eedcae
TL
4import errno
5import random
6import logging
eafe8130 7import collections
adb31ebb
TL
8import uuid
9import unittest
10from hashlib import md5
11from textwrap import dedent
81eedcae
TL
12
13from tasks.cephfs.cephfs_test_case import CephFSTestCase
f67539c2 14from tasks.cephfs.fuse_mount import FuseMount
81eedcae
TL
15from teuthology.exceptions import CommandFailedError
16
17log = logging.getLogger(__name__)
18
f67539c2
TL
19class TestVolumesHelper(CephFSTestCase):
20 """Helper class for testing FS volume, subvolume group and subvolume operations."""
92f5a8d4 21 TEST_VOLUME_PREFIX = "volume"
81eedcae
TL
22 TEST_SUBVOLUME_PREFIX="subvolume"
23 TEST_GROUP_PREFIX="group"
24 TEST_SNAPSHOT_PREFIX="snapshot"
92f5a8d4 25 TEST_CLONE_PREFIX="clone"
494da23a
TL
26 TEST_FILE_NAME_PREFIX="subvolume_file"
27
28 # for filling subvolume with data
cd265ab1 29 CLIENTS_REQUIRED = 2
f6b5b4d7 30 MDSS_REQUIRED = 2
494da23a
TL
31
32 # io defaults
33 DEFAULT_FILE_SIZE = 1 # MB
34 DEFAULT_NUMBER_OF_FILES = 1024
81eedcae
TL
35
36 def _fs_cmd(self, *args):
37 return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
38
f6b5b4d7
TL
39 def _raw_cmd(self, *args):
40 return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
41
92f5a8d4
TL
42 def __check_clone_state(self, state, clone, clone_group=None, timo=120):
43 check = 0
44 args = ["clone", "status", self.volname, clone]
45 if clone_group:
46 args.append(clone_group)
47 args = tuple(args)
48 while check < timo:
49 result = json.loads(self._fs_cmd(*args))
50 if result["status"]["state"] == state:
51 break
52 check += 1
53 time.sleep(1)
54 self.assertTrue(check < timo)
55
56 def _wait_for_clone_to_complete(self, clone, clone_group=None, timo=120):
57 self.__check_clone_state("complete", clone, clone_group, timo)
58
59 def _wait_for_clone_to_fail(self, clone, clone_group=None, timo=120):
60 self.__check_clone_state("failed", clone, clone_group, timo)
61
9f95a23c
TL
62 def _check_clone_canceled(self, clone, clone_group=None):
63 self.__check_clone_state("canceled", clone, clone_group, timo=1)
64
adb31ebb
TL
65 def _get_subvolume_snapshot_path(self, subvolume, snapshot, source_group, subvol_path, source_version):
66 if source_version == 2:
67 # v2
68 if subvol_path is not None:
69 (base_path, uuid_str) = os.path.split(subvol_path)
70 else:
71 (base_path, uuid_str) = os.path.split(self._get_subvolume_path(self.volname, subvolume, group_name=source_group))
72 return os.path.join(base_path, ".snap", snapshot, uuid_str)
73
74 # v1
75 base_path = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
76 return os.path.join(base_path, ".snap", snapshot)
77
78 def _verify_clone_attrs(self, source_path, clone_path):
79 path1 = source_path
80 path2 = clone_path
92f5a8d4 81
9f95a23c
TL
82 p = self.mount_a.run_shell(["find", path1])
83 paths = p.stdout.getvalue().strip().split()
84
85 # for each entry in source and clone (sink) verify certain inode attributes:
86 # inode type, mode, ownership, [am]time.
87 for source_path in paths:
88 sink_entry = source_path[len(path1)+1:]
89 sink_path = os.path.join(path2, sink_entry)
90
91 # mode+type
92 sval = int(self.mount_a.run_shell(['stat', '-c' '%f', source_path]).stdout.getvalue().strip(), 16)
93 cval = int(self.mount_a.run_shell(['stat', '-c' '%f', sink_path]).stdout.getvalue().strip(), 16)
94 self.assertEqual(sval, cval)
95
96 # ownership
97 sval = int(self.mount_a.run_shell(['stat', '-c' '%u', source_path]).stdout.getvalue().strip())
98 cval = int(self.mount_a.run_shell(['stat', '-c' '%u', sink_path]).stdout.getvalue().strip())
99 self.assertEqual(sval, cval)
92f5a8d4 100
9f95a23c
TL
101 sval = int(self.mount_a.run_shell(['stat', '-c' '%g', source_path]).stdout.getvalue().strip())
102 cval = int(self.mount_a.run_shell(['stat', '-c' '%g', sink_path]).stdout.getvalue().strip())
103 self.assertEqual(sval, cval)
92f5a8d4 104
9f95a23c 105 # inode timestamps
f67539c2 106 # do not check access as kclient will generally not update this like ceph-fuse will.
9f95a23c
TL
107 sval = int(self.mount_a.run_shell(['stat', '-c' '%Y', source_path]).stdout.getvalue().strip())
108 cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip())
109 self.assertEqual(sval, cval)
110
adb31ebb
TL
111 def _verify_clone_root(self, source_path, clone_path, clone, clone_group, clone_pool):
112 # verifies following clone root attrs quota, data_pool and pool_namespace
113 # remaining attributes of clone root are validated in _verify_clone_attrs
114
115 clone_info = json.loads(self._get_subvolume_info(self.volname, clone, clone_group))
116
117 # verify quota is inherited from source snapshot
118 src_quota = self.mount_a.getfattr(source_path, "ceph.quota.max_bytes")
f67539c2
TL
119 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
120 if isinstance(self.mount_a, FuseMount):
121 self.assertEqual(clone_info["bytes_quota"], "infinite" if src_quota is None else int(src_quota))
adb31ebb
TL
122
123 if clone_pool:
124 # verify pool is set as per request
125 self.assertEqual(clone_info["data_pool"], clone_pool)
126 else:
127 # verify pool and pool namespace are inherited from snapshot
128 self.assertEqual(clone_info["data_pool"],
129 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool"))
130 self.assertEqual(clone_info["pool_namespace"],
131 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool_namespace"))
132
133 def _verify_clone(self, subvolume, snapshot, clone,
134 source_group=None, clone_group=None, clone_pool=None,
135 subvol_path=None, source_version=2, timo=120):
136 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
137 # but snapshots are retained for clone verification
138 path1 = self._get_subvolume_snapshot_path(subvolume, snapshot, source_group, subvol_path, source_version)
9f95a23c 139 path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group)
92f5a8d4
TL
140
141 check = 0
adb31ebb
TL
142 # TODO: currently snapshot rentries are not stable if snapshot source entries
143 # are removed, https://tracker.ceph.com/issues/46747
144 while check < timo and subvol_path is None:
92f5a8d4
TL
145 val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries"))
146 val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries"))
147 if val1 == val2:
148 break
149 check += 1
150 time.sleep(1)
151 self.assertTrue(check < timo)
152
adb31ebb
TL
153 self._verify_clone_root(path1, path2, clone, clone_group, clone_pool)
154 self._verify_clone_attrs(path1, path2)
9f95a23c 155
92f5a8d4 156 def _generate_random_volume_name(self, count=1):
f6b5b4d7
TL
157 n = self.volume_start
158 volumes = [f"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
159 self.volume_start += count
92f5a8d4
TL
160 return volumes[0] if count == 1 else volumes
161
162 def _generate_random_subvolume_name(self, count=1):
f6b5b4d7
TL
163 n = self.subvolume_start
164 subvolumes = [f"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
165 self.subvolume_start += count
92f5a8d4
TL
166 return subvolumes[0] if count == 1 else subvolumes
167
168 def _generate_random_group_name(self, count=1):
f6b5b4d7
TL
169 n = self.group_start
170 groups = [f"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i in range(n, n+count)]
171 self.group_start += count
92f5a8d4
TL
172 return groups[0] if count == 1 else groups
173
174 def _generate_random_snapshot_name(self, count=1):
f6b5b4d7
TL
175 n = self.snapshot_start
176 snaps = [f"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i in range(n, n+count)]
177 self.snapshot_start += count
92f5a8d4
TL
178 return snaps[0] if count == 1 else snaps
179
180 def _generate_random_clone_name(self, count=1):
f6b5b4d7
TL
181 n = self.clone_start
182 clones = [f"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i in range(n, n+count)]
183 self.clone_start += count
92f5a8d4 184 return clones[0] if count == 1 else clones
81eedcae
TL
185
186 def _enable_multi_fs(self):
187 self._fs_cmd("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
188
189 def _create_or_reuse_test_volume(self):
190 result = json.loads(self._fs_cmd("volume", "ls"))
191 if len(result) == 0:
192 self.vol_created = True
92f5a8d4 193 self.volname = self._generate_random_volume_name()
81eedcae
TL
194 self._fs_cmd("volume", "create", self.volname)
195 else:
196 self.volname = result[0]['name']
197
494da23a
TL
198 def _get_subvolume_group_path(self, vol_name, group_name):
199 args = ("subvolumegroup", "getpath", vol_name, group_name)
200 path = self._fs_cmd(*args)
201 # remove the leading '/', and trailing whitespaces
202 return path[1:].rstrip()
203
81eedcae
TL
204 def _get_subvolume_path(self, vol_name, subvol_name, group_name=None):
205 args = ["subvolume", "getpath", vol_name, subvol_name]
206 if group_name:
207 args.append(group_name)
208 args = tuple(args)
209 path = self._fs_cmd(*args)
210 # remove the leading '/', and trailing whitespaces
211 return path[1:].rstrip()
212
1911f103
TL
213 def _get_subvolume_info(self, vol_name, subvol_name, group_name=None):
214 args = ["subvolume", "info", vol_name, subvol_name]
215 if group_name:
216 args.append(group_name)
217 args = tuple(args)
218 subvol_md = self._fs_cmd(*args)
219 return subvol_md
220
e306af50
TL
221 def _get_subvolume_snapshot_info(self, vol_name, subvol_name, snapname, group_name=None):
222 args = ["subvolume", "snapshot", "info", vol_name, subvol_name, snapname]
223 if group_name:
224 args.append(group_name)
225 args = tuple(args)
226 snap_md = self._fs_cmd(*args)
227 return snap_md
228
81eedcae 229 def _delete_test_volume(self):
eafe8130 230 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
81eedcae 231
adb31ebb
TL
232 def _do_subvolume_pool_and_namespace_update(self, subvolume, pool=None, pool_namespace=None, subvolume_group=None):
233 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
234
235 if pool is not None:
522d829b 236 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool, sudo=True)
adb31ebb
TL
237
238 if pool_namespace is not None:
522d829b 239 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace, sudo=True)
adb31ebb
TL
240
241 def _do_subvolume_attr_update(self, subvolume, uid, gid, mode, subvolume_group=None):
242 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
243
244 # mode
522d829b 245 self.mount_a.run_shell(['chmod', mode, subvolpath], sudo=True)
adb31ebb
TL
246
247 # ownership
522d829b
TL
248 self.mount_a.run_shell(['chown', uid, subvolpath], sudo=True)
249 self.mount_a.run_shell(['chgrp', gid, subvolpath], sudo=True)
adb31ebb 250
92f5a8d4
TL
251 def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None,
252 number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE):
494da23a 253 # get subvolume path for IO
92f5a8d4
TL
254 args = ["subvolume", "getpath", self.volname, subvolume]
255 if subvolume_group:
256 args.append(subvolume_group)
257 args = tuple(args)
258 subvolpath = self._fs_cmd(*args)
494da23a
TL
259 self.assertNotEqual(subvolpath, None)
260 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
261
92f5a8d4
TL
262 io_path = subvolpath
263 if create_dir:
264 io_path = os.path.join(subvolpath, create_dir)
522d829b 265 self.mount_a.run_shell_payload(f"mkdir -p {io_path}")
92f5a8d4
TL
266
267 log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path))
494da23a
TL
268 for i in range(number_of_files):
269 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
92f5a8d4 270 self.mount_a.write_n_mb(os.path.join(io_path, filename), file_size)
494da23a 271
9f95a23c
TL
272 def _do_subvolume_io_mixed(self, subvolume, subvolume_group=None):
273 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
274
275 reg_file = "regfile.0"
9f95a23c
TL
276 dir_path = os.path.join(subvolpath, "dir.0")
277 sym_path1 = os.path.join(subvolpath, "sym.0")
278 # this symlink's ownership would be changed
279 sym_path2 = os.path.join(dir_path, "sym.0")
280
522d829b
TL
281 self.mount_a.run_shell(["mkdir", dir_path])
282 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path1])
283 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path2])
9f95a23c 284 # flip ownership to nobody. assumption: nobody's id is 65534
522d829b 285 self.mount_a.run_shell(["chown", "-h", "65534:65534", sym_path2], sudo=True, omit_sudo=False)
9f95a23c 286
494da23a
TL
287 def _wait_for_trash_empty(self, timeout=30):
288 # XXX: construct the trash dir path (note that there is no mgr
289 # [sub]volume interface for this).
290 trashdir = os.path.join("./", "volumes", "_deleting")
92f5a8d4 291 self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
494da23a 292
adb31ebb
TL
293 def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False):
294 if legacy:
295 subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group)
296 m = md5()
297 m.update(("/"+subvol_path).encode('utf-8'))
298 meta_filename = "{0}.meta".format(m.digest().hex())
299 metapath = os.path.join(".", "volumes", "_legacy", meta_filename)
300 else:
301 group = subvol_group if subvol_group is not None else '_nogroup'
302 metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
303
522d829b 304 out = self.mount_a.run_shell(['cat', metapath], sudo=True)
adb31ebb
TL
305 lines = out.stdout.getvalue().strip().split('\n')
306 sv_version = -1
307 for line in lines:
308 if line == "version = " + str(version):
309 sv_version = version
310 break
311 self.assertEqual(sv_version, version, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
312 version, sv_version, metapath))
313
314 def _create_v1_subvolume(self, subvol_name, subvol_group=None, has_snapshot=True, subvol_type='subvolume', state='complete'):
315 group = subvol_group if subvol_group is not None else '_nogroup'
316 basepath = os.path.join("volumes", group, subvol_name)
317 uuid_str = str(uuid.uuid4())
318 createpath = os.path.join(basepath, uuid_str)
522d829b 319 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
adb31ebb
TL
320
321 # create a v1 snapshot, to prevent auto upgrades
322 if has_snapshot:
323 snappath = os.path.join(createpath, ".snap", "fake")
522d829b 324 self.mount_a.run_shell(['mkdir', '-p', snappath], sudo=True)
adb31ebb
TL
325
326 # add required xattrs to subvolume
327 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
522d829b 328 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
adb31ebb
TL
329
330 # create a v1 .meta file
331 meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, "/" + createpath, state)
332 if state == 'pending':
333 # add a fake clone source
334 meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
335 meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta")
522d829b 336 self.mount_a.client_remote.write_file(meta_filepath1, meta_contents, sudo=True)
adb31ebb
TL
337 return createpath
338
339 def _update_fake_trash(self, subvol_name, subvol_group=None, trash_name='fake', create=True):
340 group = subvol_group if subvol_group is not None else '_nogroup'
341 trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name)
342 if create:
522d829b 343 self.mount_a.run_shell(['mkdir', '-p', trashpath], sudo=True)
adb31ebb 344 else:
522d829b 345 self.mount_a.run_shell(['rmdir', trashpath], sudo=True)
adb31ebb 346
cd265ab1
TL
347 def _configure_guest_auth(self, guest_mount, authid, key):
348 """
349 Set up auth credentials for a guest client.
350 """
351 # Create keyring file for the guest client.
352 keyring_txt = dedent("""
353 [client.{authid}]
354 key = {key}
355
356 """.format(authid=authid,key=key))
357
358 guest_mount.client_id = authid
359 guest_mount.client_remote.write_file(guest_mount.get_keyring_path(),
360 keyring_txt, sudo=True)
361 # Add a guest client section to the ceph config file.
362 self.config_set("client.{0}".format(authid), "debug client", 20)
363 self.config_set("client.{0}".format(authid), "debug objecter", 20)
364 self.set_conf("client.{0}".format(authid),
365 "keyring", guest_mount.get_keyring_path())
366
367 def _auth_metadata_get(self, filedata):
368 """
369 Return a deserialized JSON object, or None
370 """
371 try:
372 data = json.loads(filedata)
373 except json.decoder.JSONDecodeError:
374 data = None
375 return data
376
81eedcae 377 def setUp(self):
f67539c2 378 super(TestVolumesHelper, self).setUp()
81eedcae
TL
379 self.volname = None
380 self.vol_created = False
381 self._enable_multi_fs()
382 self._create_or_reuse_test_volume()
f6b5b4d7
TL
383 self.config_set('mon', 'mon_allow_pool_delete', True)
384 self.volume_start = random.randint(1, (1<<20))
385 self.subvolume_start = random.randint(1, (1<<20))
386 self.group_start = random.randint(1, (1<<20))
387 self.snapshot_start = random.randint(1, (1<<20))
388 self.clone_start = random.randint(1, (1<<20))
81eedcae
TL
389
390 def tearDown(self):
391 if self.vol_created:
392 self._delete_test_volume()
f67539c2 393 super(TestVolumesHelper, self).tearDown()
92f5a8d4 394
92f5a8d4 395
f67539c2
TL
396class TestVolumes(TestVolumesHelper):
397 """Tests for FS volume operations."""
92f5a8d4
TL
398 def test_volume_create(self):
399 """
400 That the volume can be created and then cleans up
401 """
402 volname = self._generate_random_volume_name()
403 self._fs_cmd("volume", "create", volname)
404 volumels = json.loads(self._fs_cmd("volume", "ls"))
405
406 if not (volname in ([volume['name'] for volume in volumels])):
407 raise RuntimeError("Error creating volume '{0}'".format(volname))
408 else:
409 # clean up
410 self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
411
412 def test_volume_ls(self):
413 """
414 That the existing and the newly created volumes can be listed and
415 finally cleans up.
416 """
417 vls = json.loads(self._fs_cmd("volume", "ls"))
418 volumes = [volume['name'] for volume in vls]
419
420 #create new volumes and add it to the existing list of volumes
cd265ab1 421 volumenames = self._generate_random_volume_name(2)
92f5a8d4
TL
422 for volumename in volumenames:
423 self._fs_cmd("volume", "create", volumename)
424 volumes.extend(volumenames)
425
426 # list volumes
427 try:
428 volumels = json.loads(self._fs_cmd('volume', 'ls'))
429 if len(volumels) == 0:
430 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
431 else:
432 volnames = [volume['name'] for volume in volumels]
433 if collections.Counter(volnames) != collections.Counter(volumes):
434 raise RuntimeError("Error creating or listing volumes")
435 finally:
436 # clean up
437 for volume in volumenames:
438 self._fs_cmd("volume", "rm", volume, "--yes-i-really-mean-it")
439
eafe8130 440 def test_volume_rm(self):
92f5a8d4
TL
441 """
442 That the volume can only be removed when --yes-i-really-mean-it is used
443 and verify that the deleted volume is not listed anymore.
444 """
adb31ebb
TL
445 for m in self.mounts:
446 m.umount_wait()
eafe8130
TL
447 try:
448 self._fs_cmd("volume", "rm", self.volname)
449 except CommandFailedError as ce:
450 if ce.exitstatus != errno.EPERM:
451 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
452 "but it failed with {0}".format(ce.exitstatus))
453 else:
454 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
455
456 #check if it's gone
92f5a8d4 457 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
eafe8130 458 if (self.volname in [volume['name'] for volume in volumes]):
92f5a8d4
TL
459 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
460 "The volume {0} not removed.".format(self.volname))
eafe8130
TL
461 else:
462 raise RuntimeError("expected the 'fs volume rm' command to fail.")
463
f6b5b4d7
TL
464 def test_volume_rm_arbitrary_pool_removal(self):
465 """
466 That the arbitrary pool added to the volume out of band is removed
467 successfully on volume removal.
468 """
adb31ebb
TL
469 for m in self.mounts:
470 m.umount_wait()
f6b5b4d7
TL
471 new_pool = "new_pool"
472 # add arbitrary data pool
473 self.fs.add_data_pool(new_pool)
474 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
475 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
476
477 #check if fs is gone
478 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
479 volnames = [volume['name'] for volume in volumes]
480 self.assertNotIn(self.volname, volnames)
481
482 #check if osd pools are gone
483 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
484 for pool in vol_status["pools"]:
485 self.assertNotIn(pool["name"], pools)
486
487 def test_volume_rm_when_mon_delete_pool_false(self):
488 """
489 That the volume can only be removed when mon_allowd_pool_delete is set
490 to true and verify that the pools are removed after volume deletion.
491 """
adb31ebb
TL
492 for m in self.mounts:
493 m.umount_wait()
f6b5b4d7
TL
494 self.config_set('mon', 'mon_allow_pool_delete', False)
495 try:
496 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
497 except CommandFailedError as ce:
498 self.assertEqual(ce.exitstatus, errno.EPERM,
499 "expected the 'fs volume rm' command to fail with EPERM, "
500 "but it failed with {0}".format(ce.exitstatus))
501 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
502 self.config_set('mon', 'mon_allow_pool_delete', True)
503 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
504
505 #check if fs is gone
506 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
507 volnames = [volume['name'] for volume in volumes]
508 self.assertNotIn(self.volname, volnames,
509 "volume {0} exists after removal".format(self.volname))
510 #check if pools are gone
511 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
512 for pool in vol_status["pools"]:
513 self.assertNotIn(pool["name"], pools,
514 "pool {0} exists after volume removal".format(pool["name"]))
515
81eedcae 516
f67539c2
TL
517class TestSubvolumeGroups(TestVolumesHelper):
518 """Tests for FS subvolume group operations."""
519 def test_default_uid_gid_subvolume_group(self):
520 group = self._generate_random_group_name()
521 expected_uid = 0
522 expected_gid = 0
81eedcae 523
f67539c2
TL
524 # create group
525 self._fs_cmd("subvolumegroup", "create", self.volname, group)
526 group_path = self._get_subvolume_group_path(self.volname, group)
81eedcae 527
f67539c2
TL
528 # check group's uid and gid
529 stat = self.mount_a.stat(group_path)
530 self.assertEqual(stat['st_uid'], expected_uid)
531 self.assertEqual(stat['st_gid'], expected_gid)
532
533 # remove group
534 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
535
536 def test_nonexistent_subvolume_group_create(self):
537 subvolume = self._generate_random_subvolume_name()
538 group = "non_existent_group"
539
540 # try, creating subvolume in a nonexistent group
81eedcae 541 try:
f67539c2 542 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
81eedcae
TL
543 except CommandFailedError as ce:
544 if ce.exitstatus != errno.ENOENT:
545 raise
92f5a8d4 546 else:
f67539c2 547 raise RuntimeError("expected the 'fs subvolume create' command to fail")
81eedcae 548
f67539c2
TL
549 def test_nonexistent_subvolume_group_rm(self):
550 group = "non_existent_group"
494da23a 551
f67539c2
TL
552 # try, remove subvolume group
553 try:
554 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
555 except CommandFailedError as ce:
556 if ce.exitstatus != errno.ENOENT:
557 raise
558 else:
559 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
92f5a8d4 560
f67539c2
TL
561 def test_subvolume_group_create_with_auto_cleanup_on_fail(self):
562 group = self._generate_random_group_name()
563 data_pool = "invalid_pool"
564 # create group with invalid data pool layout
565 with self.assertRaises(CommandFailedError):
566 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
92f5a8d4 567
f67539c2
TL
568 # check whether group path is cleaned up
569 try:
570 self._fs_cmd("subvolumegroup", "getpath", self.volname, group)
571 except CommandFailedError as ce:
572 if ce.exitstatus != errno.ENOENT:
573 raise
574 else:
575 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
92f5a8d4 576
f67539c2
TL
577 def test_subvolume_group_create_with_desired_data_pool_layout(self):
578 group1, group2 = self._generate_random_group_name(2)
92f5a8d4 579
f67539c2
TL
580 # create group
581 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
582 group1_path = self._get_subvolume_group_path(self.volname, group1)
92f5a8d4 583
f67539c2
TL
584 default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool")
585 new_pool = "new_pool"
586 self.assertNotEqual(default_pool, new_pool)
adb31ebb 587
f67539c2
TL
588 # add data pool
589 newid = self.fs.add_data_pool(new_pool)
adb31ebb 590
f67539c2
TL
591 # create group specifying the new data pool as its pool layout
592 self._fs_cmd("subvolumegroup", "create", self.volname, group2,
593 "--pool_layout", new_pool)
594 group2_path = self._get_subvolume_group_path(self.volname, group2)
92f5a8d4 595
f67539c2
TL
596 desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool")
597 try:
598 self.assertEqual(desired_pool, new_pool)
599 except AssertionError:
600 self.assertEqual(int(desired_pool), newid) # old kernel returns id
92f5a8d4 601
f67539c2
TL
602 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
603 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
92f5a8d4 604
f67539c2
TL
605 def test_subvolume_group_create_with_desired_mode(self):
606 group1, group2 = self._generate_random_group_name(2)
607 # default mode
608 expected_mode1 = "755"
609 # desired mode
610 expected_mode2 = "777"
92f5a8d4 611
f67539c2 612 # create group
522d829b 613 self._fs_cmd("subvolumegroup", "create", self.volname, group2, f"--mode={expected_mode2}")
a4b75251 614 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
92f5a8d4 615
f67539c2
TL
616 group1_path = self._get_subvolume_group_path(self.volname, group1)
617 group2_path = self._get_subvolume_group_path(self.volname, group2)
a4b75251 618 volumes_path = os.path.dirname(group1_path)
adb31ebb 619
f67539c2
TL
620 # check group's mode
621 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip()
622 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip()
a4b75251 623 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
f67539c2
TL
624 self.assertEqual(actual_mode1, expected_mode1)
625 self.assertEqual(actual_mode2, expected_mode2)
a4b75251 626 self.assertEqual(actual_mode3, expected_mode1)
adb31ebb 627
f67539c2
TL
628 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
629 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
630
631 def test_subvolume_group_create_with_desired_uid_gid(self):
92f5a8d4 632 """
f67539c2
TL
633 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
634 expected values.
92f5a8d4 635 """
f67539c2
TL
636 uid = 1000
637 gid = 1000
92f5a8d4 638
f67539c2
TL
639 # create subvolume group
640 subvolgroupname = self._generate_random_group_name()
641 self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid))
92f5a8d4
TL
642
643 # make sure it exists
f67539c2
TL
644 subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname)
645 self.assertNotEqual(subvolgrouppath, None)
92f5a8d4 646
f67539c2
TL
647 # verify the uid and gid
648 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip())
649 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip())
650 self.assertEqual(uid, suid)
651 self.assertEqual(gid, sgid)
652
653 # remove group
654 self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname)
655
656 def test_subvolume_group_create_with_invalid_data_pool_layout(self):
657 group = self._generate_random_group_name()
658 data_pool = "invalid_pool"
659 # create group with invalid data pool layout
92f5a8d4 660 try:
f67539c2 661 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
92f5a8d4 662 except CommandFailedError as ce:
f67539c2
TL
663 if ce.exitstatus != errno.EINVAL:
664 raise
92f5a8d4 665 else:
f67539c2 666 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
92f5a8d4 667
f67539c2
TL
668 def test_subvolume_group_ls(self):
669 # tests the 'fs subvolumegroup ls' command
92f5a8d4 670
f67539c2 671 subvolumegroups = []
adb31ebb 672
f67539c2
TL
673 #create subvolumegroups
674 subvolumegroups = self._generate_random_group_name(3)
675 for groupname in subvolumegroups:
676 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
adb31ebb 677
f67539c2
TL
678 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
679 if len(subvolumegroupls) == 0:
680 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
681 else:
682 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
683 if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups):
684 raise RuntimeError("Error creating or listing subvolume groups")
92f5a8d4 685
f67539c2
TL
686 def test_subvolume_group_ls_for_nonexistent_volume(self):
687 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
688 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
92f5a8d4 689
f67539c2
TL
690 # list subvolume groups
691 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
692 if len(subvolumegroupls) > 0:
693 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
92f5a8d4 694
f67539c2
TL
695 def test_subvolumegroup_pin_distributed(self):
696 self.fs.set_max_mds(2)
697 status = self.fs.wait_for_daemons()
698 self.config_set('mds', 'mds_export_ephemeral_distributed', True)
92f5a8d4 699
f67539c2
TL
700 group = "pinme"
701 self._fs_cmd("subvolumegroup", "create", self.volname, group)
702 self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True")
703 subvolumes = self._generate_random_subvolume_name(50)
704 for subvolume in subvolumes:
705 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
706 self._wait_distributed_subtrees(2 * 2, status=status, rank="all")
92f5a8d4 707
f67539c2
TL
708 # remove subvolumes
709 for subvolume in subvolumes:
710 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
adb31ebb
TL
711
712 # verify trash dir is clean
713 self._wait_for_trash_empty()
714
f67539c2
TL
715 def test_subvolume_group_rm_force(self):
716 # test removing non-existing subvolume group with --force
717 group = self._generate_random_group_name()
718 try:
719 self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force")
720 except CommandFailedError:
721 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
92f5a8d4 722
92f5a8d4 723
f67539c2
TL
724class TestSubvolumes(TestVolumesHelper):
725 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
726 def test_async_subvolume_rm(self):
727 subvolumes = self._generate_random_subvolume_name(100)
92f5a8d4 728
f67539c2
TL
729 # create subvolumes
730 for subvolume in subvolumes:
522d829b 731 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
f67539c2 732 self._do_subvolume_io(subvolume, number_of_files=10)
92f5a8d4 733
f67539c2 734 self.mount_a.umount_wait()
92f5a8d4 735
f67539c2
TL
736 # remove subvolumes
737 for subvolume in subvolumes:
738 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
92f5a8d4 739
f67539c2
TL
740 self.mount_a.mount_wait()
741
742 # verify trash dir is clean
743 self._wait_for_trash_empty(timeout=300)
744
745 def test_default_uid_gid_subvolume(self):
746 subvolume = self._generate_random_subvolume_name()
747 expected_uid = 0
748 expected_gid = 0
749
750 # create subvolume
751 self._fs_cmd("subvolume", "create", self.volname, subvolume)
752 subvol_path = self._get_subvolume_path(self.volname, subvolume)
753
754 # check subvolume's uid and gid
755 stat = self.mount_a.stat(subvol_path)
756 self.assertEqual(stat['st_uid'], expected_uid)
757 self.assertEqual(stat['st_gid'], expected_gid)
92f5a8d4 758
adb31ebb 759 # remove subvolume
f67539c2 760 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
761
762 # verify trash dir is clean
763 self._wait_for_trash_empty()
764
f67539c2
TL
765 def test_nonexistent_subvolume_rm(self):
766 # remove non-existing subvolume
767 subvolume = "non_existent_subvolume"
92f5a8d4 768
f67539c2
TL
769 # try, remove subvolume
770 try:
771 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
772 except CommandFailedError as ce:
773 if ce.exitstatus != errno.ENOENT:
774 raise
775 else:
776 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
92f5a8d4 777
f67539c2 778 def test_subvolume_create_and_rm(self):
92f5a8d4 779 # create subvolume
f67539c2
TL
780 subvolume = self._generate_random_subvolume_name()
781 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4
TL
782
783 # make sure it exists
f67539c2 784 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
92f5a8d4
TL
785 self.assertNotEqual(subvolpath, None)
786
f67539c2
TL
787 # remove subvolume
788 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
789 # make sure its gone
92f5a8d4 790 try:
f67539c2 791 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
92f5a8d4 792 except CommandFailedError as ce:
f67539c2
TL
793 if ce.exitstatus != errno.ENOENT:
794 raise
92f5a8d4 795 else:
f67539c2 796 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
adb31ebb
TL
797
798 # verify trash dir is clean
799 self._wait_for_trash_empty()
800
f67539c2
TL
801 def test_subvolume_create_and_rm_in_group(self):
802 subvolume = self._generate_random_subvolume_name()
803 group = self._generate_random_group_name()
92f5a8d4 804
f67539c2
TL
805 # create group
806 self._fs_cmd("subvolumegroup", "create", self.volname, group)
92f5a8d4 807
f67539c2
TL
808 # create subvolume in group
809 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
92f5a8d4 810
adb31ebb 811 # remove subvolume
f67539c2 812 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
adb31ebb
TL
813
814 # verify trash dir is clean
815 self._wait_for_trash_empty()
816
f67539c2
TL
817 # remove group
818 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
819
81eedcae
TL
820 def test_subvolume_create_idempotence(self):
821 # create subvolume
822 subvolume = self._generate_random_subvolume_name()
823 self._fs_cmd("subvolume", "create", self.volname, subvolume)
824
825 # try creating w/ same subvolume name -- should be idempotent
826 self._fs_cmd("subvolume", "create", self.volname, subvolume)
827
828 # remove subvolume
829 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
830
494da23a
TL
831 # verify trash dir is clean
832 self._wait_for_trash_empty()
833
e306af50
TL
834 def test_subvolume_create_idempotence_resize(self):
835 # create subvolume
836 subvolume = self._generate_random_subvolume_name()
837 self._fs_cmd("subvolume", "create", self.volname, subvolume)
838
839 # try creating w/ same subvolume name with size -- should set quota
840 self._fs_cmd("subvolume", "create", self.volname, subvolume, "1000000000")
841
842 # get subvolume metadata
843 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
844 self.assertEqual(subvol_info["bytes_quota"], 1000000000)
845
846 # remove subvolume
847 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
848
849 # verify trash dir is clean
850 self._wait_for_trash_empty()
851
f67539c2
TL
852 def test_subvolume_create_isolated_namespace(self):
853 """
854 Create subvolume in separate rados namespace
855 """
f6b5b4d7 856
f67539c2 857 # create subvolume
f6b5b4d7 858 subvolume = self._generate_random_subvolume_name()
f67539c2 859 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated")
f6b5b4d7 860
f67539c2
TL
861 # get subvolume metadata
862 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
863 self.assertNotEqual(len(subvol_info), 0)
864 self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume)
f6b5b4d7 865
f67539c2 866 # remove subvolumes
adb31ebb
TL
867 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
868
869 # verify trash dir is clean
870 self._wait_for_trash_empty()
871
f67539c2
TL
872 def test_subvolume_create_with_auto_cleanup_on_fail(self):
873 subvolume = self._generate_random_subvolume_name()
874 data_pool = "invalid_pool"
875 # create subvolume with invalid data pool layout fails
876 with self.assertRaises(CommandFailedError):
877 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
adb31ebb 878
f67539c2
TL
879 # check whether subvol path is cleaned up
880 try:
881 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
882 except CommandFailedError as ce:
883 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume")
884 else:
885 self.fail("expected the 'fs subvolume getpath' command to fail")
886
887 # verify trash dir is clean
adb31ebb
TL
888 self._wait_for_trash_empty()
889
f67539c2
TL
890 def test_subvolume_create_with_desired_data_pool_layout_in_group(self):
891 subvol1, subvol2 = self._generate_random_subvolume_name(2)
892 group = self._generate_random_group_name()
f6b5b4d7 893
f67539c2
TL
894 # create group. this also helps set default pool layout for subvolumes
895 # created within the group.
896 self._fs_cmd("subvolumegroup", "create", self.volname, group)
f6b5b4d7 897
f67539c2
TL
898 # create subvolume in group.
899 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
900 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
901
902 default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool")
903 new_pool = "new_pool"
904 self.assertNotEqual(default_pool, new_pool)
905
906 # add data pool
907 newid = self.fs.add_data_pool(new_pool)
908
909 # create subvolume specifying the new data pool as its pool layout
910 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group,
911 "--pool_layout", new_pool)
912 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
913
914 desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool")
915 try:
916 self.assertEqual(desired_pool, new_pool)
917 except AssertionError:
918 self.assertEqual(int(desired_pool), newid) # old kernel returns id
919
920 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
921 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
922 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
adb31ebb
TL
923
924 # verify trash dir is clean
925 self._wait_for_trash_empty()
926
a4b75251
TL
927 def test_subvolume_create_with_desired_mode(self):
928 subvol1 = self._generate_random_subvolume_name()
929
930 # default mode
931 default_mode = "755"
932 # desired mode
933 desired_mode = "777"
934
935 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--mode", "777")
936
937 subvol1_path = self._get_subvolume_path(self.volname, subvol1)
938
939 # check subvolumegroup's mode
940 subvol_par_path = os.path.dirname(subvol1_path)
941 group_path = os.path.dirname(subvol_par_path)
942 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip()
943 self.assertEqual(actual_mode1, default_mode)
944 # check /volumes mode
945 volumes_path = os.path.dirname(group_path)
946 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
947 self.assertEqual(actual_mode2, default_mode)
948 # check subvolume's mode
949 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
950 self.assertEqual(actual_mode3, desired_mode)
951
952 self._fs_cmd("subvolume", "rm", self.volname, subvol1)
953
954 # verify trash dir is clean
955 self._wait_for_trash_empty()
956
f67539c2
TL
957 def test_subvolume_create_with_desired_mode_in_group(self):
958 subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
959
960 group = self._generate_random_group_name()
961 # default mode
962 expected_mode1 = "755"
963 # desired mode
964 expected_mode2 = "777"
965
966 # create group
967 self._fs_cmd("subvolumegroup", "create", self.volname, group)
968
969 # create subvolume in group
970 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
971 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777")
972 # check whether mode 0777 also works
973 self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777")
974
975 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
976 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
977 subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group)
978
979 # check subvolume's mode
980 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
981 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip()
982 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip()
983 self.assertEqual(actual_mode1, expected_mode1)
984 self.assertEqual(actual_mode2, expected_mode2)
985 self.assertEqual(actual_mode3, expected_mode2)
986
987 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
988 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
989 self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
990 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
991
992 # verify trash dir is clean
993 self._wait_for_trash_empty()
994
995 def test_subvolume_create_with_desired_uid_gid(self):
e306af50 996 """
f67539c2
TL
997 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
998 expected values.
e306af50 999 """
f67539c2
TL
1000 uid = 1000
1001 gid = 1000
e306af50
TL
1002
1003 # create subvolume
f67539c2
TL
1004 subvolname = self._generate_random_subvolume_name()
1005 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid))
e306af50 1006
f67539c2
TL
1007 # make sure it exists
1008 subvolpath = self._get_subvolume_path(self.volname, subvolname)
1009 self.assertNotEqual(subvolpath, None)
e306af50 1010
f67539c2
TL
1011 # verify the uid and gid
1012 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip())
1013 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip())
1014 self.assertEqual(uid, suid)
1015 self.assertEqual(gid, sgid)
1016
1017 # remove subvolume
1018 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
e306af50
TL
1019
1020 # verify trash dir is clean
1021 self._wait_for_trash_empty()
1022
eafe8130
TL
1023 def test_subvolume_create_with_invalid_data_pool_layout(self):
1024 subvolume = self._generate_random_subvolume_name()
1025 data_pool = "invalid_pool"
1026 # create subvolume with invalid data pool layout
1027 try:
1028 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
1029 except CommandFailedError as ce:
adb31ebb 1030 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid pool layout")
eafe8130 1031 else:
adb31ebb
TL
1032 self.fail("expected the 'fs subvolume create' command to fail")
1033
1034 # verify trash dir is clean
1035 self._wait_for_trash_empty()
92f5a8d4 1036
eafe8130
TL
1037 def test_subvolume_create_with_invalid_size(self):
1038 # create subvolume with an invalid size -1
1039 subvolume = self._generate_random_subvolume_name()
1040 try:
1041 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--size", "-1")
1042 except CommandFailedError as ce:
adb31ebb 1043 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid size")
eafe8130 1044 else:
adb31ebb
TL
1045 self.fail("expected the 'fs subvolume create' command to fail")
1046
1047 # verify trash dir is clean
1048 self._wait_for_trash_empty()
eafe8130 1049
f67539c2
TL
1050 def test_subvolume_expand(self):
1051 """
1052 That a subvolume can be expanded in size and its quota matches the expected size.
1053 """
81eedcae 1054
f67539c2
TL
1055 # create subvolume
1056 subvolname = self._generate_random_subvolume_name()
1057 osize = self.DEFAULT_FILE_SIZE*1024*1024
1058 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 1059
f67539c2
TL
1060 # make sure it exists
1061 subvolpath = self._get_subvolume_path(self.volname, subvolname)
1062 self.assertNotEqual(subvolpath, None)
81eedcae 1063
f67539c2
TL
1064 # expand the subvolume
1065 nsize = osize*2
1066 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
81eedcae 1067
f67539c2
TL
1068 # verify the quota
1069 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
1070 self.assertEqual(size, nsize)
1071
1072 # remove subvolume
1073 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
1074
1075 # verify trash dir is clean
1076 self._wait_for_trash_empty()
1077
1078 def test_subvolume_info(self):
1079 # tests the 'fs subvolume info' command
1080
1081 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
1082 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
1083 "type", "uid", "features", "state"]
494da23a
TL
1084
1085 # create subvolume
f67539c2 1086 subvolume = self._generate_random_subvolume_name()
494da23a 1087 self._fs_cmd("subvolume", "create", self.volname, subvolume)
494da23a 1088
f67539c2
TL
1089 # get subvolume metadata
1090 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1091 for md in subvol_md:
1092 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
494da23a 1093
f67539c2
TL
1094 self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
1095 self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
1096 self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty")
1097 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
1098
1099 self.assertEqual(len(subvol_info["features"]), 3,
1100 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
1101 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1102 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
1103
1104 nsize = self.DEFAULT_FILE_SIZE*1024*1024
1105 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
1106
1107 # get subvolume metadata after quota set
1108 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1109 for md in subvol_md:
1110 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
1111
1112 self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
1113 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
1114 self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume")
1115 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
1116
1117 self.assertEqual(len(subvol_info["features"]), 3,
1118 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
1119 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
1120 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
1121
1122 # remove subvolumes
494da23a
TL
1123 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1124
adb31ebb
TL
1125 # verify trash dir is clean
1126 self._wait_for_trash_empty()
1127
eafe8130
TL
1128 def test_subvolume_ls(self):
1129 # tests the 'fs subvolume ls' command
1130
1131 subvolumes = []
1132
1133 # create subvolumes
92f5a8d4
TL
1134 subvolumes = self._generate_random_subvolume_name(3)
1135 for subvolume in subvolumes:
1136 self._fs_cmd("subvolume", "create", self.volname, subvolume)
eafe8130
TL
1137
1138 # list subvolumes
1139 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
1140 if len(subvolumels) == 0:
adb31ebb 1141 self.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
eafe8130
TL
1142 else:
1143 subvolnames = [subvolume['name'] for subvolume in subvolumels]
1144 if collections.Counter(subvolnames) != collections.Counter(subvolumes):
adb31ebb
TL
1145 self.fail("Error creating or listing subvolumes")
1146
1147 # remove subvolume
1148 for subvolume in subvolumes:
1149 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1150
1151 # verify trash dir is clean
1152 self._wait_for_trash_empty()
eafe8130
TL
1153
1154 def test_subvolume_ls_for_notexistent_default_group(self):
1155 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
1156 # prerequisite: we expect that the volume is created and the default group _nogroup is
1157 # NOT created (i.e. a subvolume without group is not created)
1158
1159 # list subvolumes
1160 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
1161 if len(subvolumels) > 0:
1162 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
1163
f67539c2 1164 def test_subvolume_marked(self):
92f5a8d4 1165 """
f67539c2 1166 ensure a subvolume is marked with the ceph.dir.subvolume xattr
92f5a8d4 1167 """
f67539c2 1168 subvolume = self._generate_random_subvolume_name()
92f5a8d4
TL
1169
1170 # create subvolume
f67539c2 1171 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4 1172
f67539c2
TL
1173 # getpath
1174 subvolpath = self._get_subvolume_path(self.volname, subvolume)
92f5a8d4 1175
f67539c2
TL
1176 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
1177 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
1178 # outside the subvolume
1179 dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location')
1180 srcpath = os.path.join(self.mount_a.mountpoint, subvolpath)
1181 rename_script = dedent("""
1182 import os
1183 import errno
1184 try:
1185 os.rename("{src}", "{dst}")
1186 except OSError as e:
1187 if e.errno != errno.EXDEV:
1188 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
1189 else:
1190 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
1191 """)
522d829b 1192 self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath), sudo=True)
f67539c2
TL
1193
1194 # remove subvolume
1195 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1196
1197 # verify trash dir is clean
1198 self._wait_for_trash_empty()
1199
1200 def test_subvolume_pin_export(self):
1201 self.fs.set_max_mds(2)
1202 status = self.fs.wait_for_daemons()
1203
1204 subvolume = self._generate_random_subvolume_name()
1205 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1206 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1")
1207 path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1208 path = os.path.dirname(path) # get subvolume path
1209
1210 self._get_subtrees(status=status, rank=1)
1211 self._wait_subtrees([(path, 1)], status=status)
1212
1213 # remove subvolume
1214 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
1215
1216 # verify trash dir is clean
1217 self._wait_for_trash_empty()
1218
cd265ab1
TL
1219 ### authorize operations
1220
1221 def test_authorize_deauthorize_legacy_subvolume(self):
1222 subvolume = self._generate_random_subvolume_name()
1223 group = self._generate_random_group_name()
1224 authid = "alice"
1225
1226 guest_mount = self.mount_b
1227 guest_mount.umount_wait()
1228
1229 # emulate a old-fashioned subvolume in a custom group
1230 createpath = os.path.join(".", "volumes", group, subvolume)
522d829b 1231 self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
cd265ab1
TL
1232
1233 # add required xattrs to subvolume
1234 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
522d829b 1235 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
cd265ab1
TL
1236
1237 mount_path = os.path.join("/", "volumes", group, subvolume)
1238
1239 # authorize guest authID read-write access to subvolume
1240 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1241 "--group_name", group, "--tenant_id", "tenant_id")
1242
1243 # guest authID should exist
1244 existing_ids = [a['entity'] for a in self.auth_list()]
1245 self.assertIn("client.{0}".format(authid), existing_ids)
1246
1247 # configure credentials for guest client
1248 self._configure_guest_auth(guest_mount, authid, key)
1249
1250 # mount the subvolume, and write to it
522d829b 1251 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1252 guest_mount.write_n_mb("data.bin", 1)
1253
1254 # authorize guest authID read access to subvolume
1255 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1256 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
1257
1258 # guest client sees the change in access level to read only after a
1259 # remount of the subvolume.
1260 guest_mount.umount_wait()
522d829b 1261 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1262
1263 # read existing content of the subvolume
1264 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
1265 # cannot write into read-only subvolume
1266 with self.assertRaises(CommandFailedError):
1267 guest_mount.write_n_mb("rogue.bin", 1)
1268
1269 # cleanup
1270 guest_mount.umount_wait()
1271 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
1272 "--group_name", group)
1273 # guest authID should no longer exist
1274 existing_ids = [a['entity'] for a in self.auth_list()]
1275 self.assertNotIn("client.{0}".format(authid), existing_ids)
1276 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1277 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1278
1279 def test_authorize_deauthorize_subvolume(self):
1280 subvolume = self._generate_random_subvolume_name()
1281 group = self._generate_random_group_name()
1282 authid = "alice"
1283
1284 guest_mount = self.mount_b
1285 guest_mount.umount_wait()
1286
1287 # create group
522d829b 1288 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=777")
cd265ab1
TL
1289
1290 # create subvolume in group
1291 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1292 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
1293 "--group_name", group).rstrip()
1294
1295 # authorize guest authID read-write access to subvolume
1296 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1297 "--group_name", group, "--tenant_id", "tenant_id")
1298
1299 # guest authID should exist
1300 existing_ids = [a['entity'] for a in self.auth_list()]
1301 self.assertIn("client.{0}".format(authid), existing_ids)
1302
1303 # configure credentials for guest client
1304 self._configure_guest_auth(guest_mount, authid, key)
1305
1306 # mount the subvolume, and write to it
522d829b 1307 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1308 guest_mount.write_n_mb("data.bin", 1)
1309
1310 # authorize guest authID read access to subvolume
1311 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
1312 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
1313
1314 # guest client sees the change in access level to read only after a
1315 # remount of the subvolume.
1316 guest_mount.umount_wait()
522d829b 1317 guest_mount.mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1318
1319 # read existing content of the subvolume
1320 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
1321 # cannot write into read-only subvolume
1322 with self.assertRaises(CommandFailedError):
1323 guest_mount.write_n_mb("rogue.bin", 1)
1324
1325 # cleanup
1326 guest_mount.umount_wait()
1327 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
1328 "--group_name", group)
1329 # guest authID should no longer exist
1330 existing_ids = [a['entity'] for a in self.auth_list()]
1331 self.assertNotIn("client.{0}".format(authid), existing_ids)
1332 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1333 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1334
cd265ab1
TL
1335 def test_multitenant_subvolumes(self):
1336 """
1337 That subvolume access can be restricted to a tenant.
1338
1339 That metadata used to enforce tenant isolation of
1340 subvolumes is stored as a two-way mapping between auth
1341 IDs and subvolumes that they're authorized to access.
1342 """
1343 subvolume = self._generate_random_subvolume_name()
1344 group = self._generate_random_group_name()
1345
1346 guest_mount = self.mount_b
1347
1348 # Guest clients belonging to different tenants, but using the same
1349 # auth ID.
1350 auth_id = "alice"
1351 guestclient_1 = {
1352 "auth_id": auth_id,
1353 "tenant_id": "tenant1",
1354 }
1355 guestclient_2 = {
1356 "auth_id": auth_id,
1357 "tenant_id": "tenant2",
1358 }
1359
1360 # create group
1361 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1362
1363 # create subvolume in group
1364 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1365
1366 # Check that subvolume metadata file is created on subvolume creation.
1367 subvol_metadata_filename = "_{0}:{1}.meta".format(group, subvolume)
1368 self.assertIn(subvol_metadata_filename, guest_mount.ls("volumes"))
1369
1370 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
1371 # 'tenant1', with 'rw' access to the volume.
1372 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1373 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1374
1375 # Check that auth metadata file for auth ID 'alice', is
1376 # created on authorizing 'alice' access to the subvolume.
1377 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1378 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1379
1380 # Verify that the auth metadata file stores the tenant ID that the
1381 # auth ID belongs to, the auth ID's authorized access levels
1382 # for different subvolumes, versioning details, etc.
1383 expected_auth_metadata = {
1384 "version": 5,
1385 "compat_version": 6,
1386 "dirty": False,
1387 "tenant_id": "tenant1",
1388 "subvolumes": {
1389 "{0}/{1}".format(group,subvolume): {
1390 "dirty": False,
1391 "access_level": "rw"
1392 }
1393 }
1394 }
1395
1396 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
1397 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
1398 del expected_auth_metadata["version"]
1399 del auth_metadata["version"]
1400 self.assertEqual(expected_auth_metadata, auth_metadata)
1401
1402 # Verify that the subvolume metadata file stores info about auth IDs
1403 # and their access levels to the subvolume, versioning details, etc.
1404 expected_subvol_metadata = {
1405 "version": 1,
1406 "compat_version": 1,
1407 "auths": {
1408 "alice": {
1409 "dirty": False,
1410 "access_level": "rw"
1411 }
1412 }
1413 }
1414 subvol_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(subvol_metadata_filename)))
1415
1416 self.assertGreaterEqual(subvol_metadata["version"], expected_subvol_metadata["version"])
1417 del expected_subvol_metadata["version"]
1418 del subvol_metadata["version"]
1419 self.assertEqual(expected_subvol_metadata, subvol_metadata)
1420
1421 # Cannot authorize 'guestclient_2' to access the volume.
1422 # It uses auth ID 'alice', which has already been used by a
1423 # 'guestclient_1' belonging to an another tenant for accessing
1424 # the volume.
1425
1426 try:
1427 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_2["auth_id"],
1428 "--group_name", group, "--tenant_id", guestclient_2["tenant_id"])
1429 except CommandFailedError as ce:
1430 self.assertEqual(ce.exitstatus, errno.EPERM,
1431 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
1432 else:
1433 self.fail("expected the 'fs subvolume authorize' command to fail")
1434
1435 # Check that auth metadata file is cleaned up on removing
1436 # auth ID's only access to a volume.
1437
1438 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
1439 "--group_name", group)
1440 self.assertNotIn(auth_metadata_filename, guest_mount.ls("volumes"))
1441
1442 # Check that subvolume metadata file is cleaned up on subvolume deletion.
1443 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1444 self.assertNotIn(subvol_metadata_filename, guest_mount.ls("volumes"))
1445
1446 # clean up
1447 guest_mount.umount_wait()
1448 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1449
1450 def test_subvolume_authorized_list(self):
1451 subvolume = self._generate_random_subvolume_name()
1452 group = self._generate_random_group_name()
1453 authid1 = "alice"
1454 authid2 = "guest1"
1455 authid3 = "guest2"
1456
1457 # create group
1458 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1459
1460 # create subvolume in group
1461 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1462
1463 # authorize alice authID read-write access to subvolume
1464 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid1,
1465 "--group_name", group)
1466 # authorize guest1 authID read-write access to subvolume
1467 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid2,
1468 "--group_name", group)
1469 # authorize guest2 authID read access to subvolume
1470 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid3,
1471 "--group_name", group, "--access_level", "r")
1472
1473 # list authorized-ids of the subvolume
1474 expected_auth_list = [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
1475 auth_list = json.loads(self._fs_cmd('subvolume', 'authorized_list', self.volname, subvolume, "--group_name", group))
1476 self.assertCountEqual(expected_auth_list, auth_list)
1477
1478 # cleanup
1479 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid1,
1480 "--group_name", group)
1481 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid2,
1482 "--group_name", group)
1483 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid3,
1484 "--group_name", group)
1485 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1486 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1487
1488 def test_authorize_auth_id_not_created_by_mgr_volumes(self):
1489 """
1490 If the auth_id already exists and is not created by mgr plugin,
1491 it's not allowed to authorize the auth-id by default.
1492 """
1493
1494 subvolume = self._generate_random_subvolume_name()
1495 group = self._generate_random_group_name()
1496
1497 # Create auth_id
1498 self.fs.mon_manager.raw_cluster_cmd(
1499 "auth", "get-or-create", "client.guest1",
1500 "mds", "allow *",
1501 "osd", "allow rw",
1502 "mon", "allow *"
1503 )
1504
1505 auth_id = "guest1"
1506 guestclient_1 = {
1507 "auth_id": auth_id,
1508 "tenant_id": "tenant1",
1509 }
1510
1511 # create group
1512 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1513
1514 # create subvolume in group
1515 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1516
1517 try:
1518 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1519 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1520 except CommandFailedError as ce:
1521 self.assertEqual(ce.exitstatus, errno.EPERM,
1522 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
1523 else:
1524 self.fail("expected the 'fs subvolume authorize' command to fail")
1525
1526 # clean up
1527 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1528 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1529 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1530
1531 def test_authorize_allow_existing_id_option(self):
1532 """
1533 If the auth_id already exists and is not created by mgr volumes,
1534 it's not allowed to authorize the auth-id by default but is
1535 allowed with option allow_existing_id.
1536 """
1537
1538 subvolume = self._generate_random_subvolume_name()
1539 group = self._generate_random_group_name()
1540
1541 # Create auth_id
1542 self.fs.mon_manager.raw_cluster_cmd(
1543 "auth", "get-or-create", "client.guest1",
1544 "mds", "allow *",
1545 "osd", "allow rw",
1546 "mon", "allow *"
1547 )
1548
1549 auth_id = "guest1"
1550 guestclient_1 = {
1551 "auth_id": auth_id,
1552 "tenant_id": "tenant1",
1553 }
1554
1555 # create group
1556 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1557
1558 # create subvolume in group
1559 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1560
1561 # Cannot authorize 'guestclient_1' to access the volume by default,
1562 # which already exists and not created by mgr volumes but is allowed
1563 # with option 'allow_existing_id'.
1564 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1565 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"], "--allow-existing-id")
1566
1567 # clean up
1568 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
1569 "--group_name", group)
1570 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1571 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1572 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1573
1574 def test_deauthorize_auth_id_after_out_of_band_update(self):
1575 """
1576 If the auth_id authorized by mgr/volumes plugin is updated
1577 out of band, the auth_id should not be deleted after a
1578 deauthorize. It should only remove caps associated with it.
1579 """
1580
1581 subvolume = self._generate_random_subvolume_name()
1582 group = self._generate_random_group_name()
1583
1584 auth_id = "guest1"
1585 guestclient_1 = {
1586 "auth_id": auth_id,
1587 "tenant_id": "tenant1",
1588 }
1589
1590 # create group
1591 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1592
1593 # create subvolume in group
1594 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1595
1596 # Authorize 'guestclient_1' to access the subvolume.
1597 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1598 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1599
1600 subvol_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
1601 "--group_name", group).rstrip()
1602
1603 # Update caps for guestclient_1 out of band
1604 out = self.fs.mon_manager.raw_cluster_cmd(
1605 "auth", "caps", "client.guest1",
1606 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path),
1607 "osd", "allow rw pool=cephfs_data",
1608 "mon", "allow r",
1609 "mgr", "allow *"
1610 )
1611
1612 # Deauthorize guestclient_1
1613 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
1614
1615 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
1616 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
1617 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
1618
1619 self.assertEqual("client.guest1", out[0]["entity"])
1620 self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"])
1621 self.assertEqual("allow *", out[0]["caps"]["mgr"])
1622 self.assertNotIn("osd", out[0]["caps"])
1623
1624 # clean up
1625 out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1626 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1627 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1628
1629 def test_recover_auth_metadata_during_authorize(self):
1630 """
1631 That auth metadata manager can recover from partial auth updates using
1632 metadata files, which store auth info and its update status info. This
1633 test validates the recovery during authorize.
1634 """
1635
1636 guest_mount = self.mount_b
1637
1638 subvolume = self._generate_random_subvolume_name()
1639 group = self._generate_random_group_name()
1640
1641 auth_id = "guest1"
1642 guestclient_1 = {
1643 "auth_id": auth_id,
1644 "tenant_id": "tenant1",
1645 }
1646
1647 # create group
1648 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1649
1650 # create subvolume in group
1651 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1652
1653 # Authorize 'guestclient_1' to access the subvolume.
1654 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1655 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1656
1657 # Check that auth metadata file for auth ID 'guest1', is
1658 # created on authorizing 'guest1' access to the subvolume.
1659 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1660 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1661 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1662
1663 # Induce partial auth update state by modifying the auth metadata file,
1664 # and then run authorize again.
522d829b 1665 guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
1666
1667 # Authorize 'guestclient_1' to access the subvolume.
1668 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
1669 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1670
1671 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1672 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
1673
1674 # clean up
1675 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
1676 guest_mount.umount_wait()
1677 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1678 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1679 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1680
1681 def test_recover_auth_metadata_during_deauthorize(self):
1682 """
1683 That auth metadata manager can recover from partial auth updates using
1684 metadata files, which store auth info and its update status info. This
1685 test validates the recovery during deauthorize.
1686 """
1687
1688 guest_mount = self.mount_b
1689
1690 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
1691 group = self._generate_random_group_name()
1692
1693 guestclient_1 = {
1694 "auth_id": "guest1",
1695 "tenant_id": "tenant1",
1696 }
1697
1698 # create group
1699 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1700
1701 # create subvolumes in group
1702 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
1703 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
1704
1705 # Authorize 'guestclient_1' to access the subvolume1.
1706 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
1707 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1708
1709 # Check that auth metadata file for auth ID 'guest1', is
1710 # created on authorizing 'guest1' access to the subvolume1.
1711 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1712 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1713 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1714
1715 # Authorize 'guestclient_1' to access the subvolume2.
1716 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
1717 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1718
1719 # Induce partial auth update state by modifying the auth metadata file,
1720 # and then run de-authorize.
522d829b 1721 guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
1722
1723 # Deauthorize 'guestclient_1' to access the subvolume2.
1724 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"],
1725 "--group_name", group)
1726
1727 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
1728 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
1729
1730 # clean up
1731 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
1732 guest_mount.umount_wait()
1733 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1734 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
1735 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
1736 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1737
1738 def test_update_old_style_auth_metadata_to_new_during_authorize(self):
1739 """
1740 CephVolumeClient stores the subvolume data in auth metadata file with
1741 'volumes' key as there was no subvolume namespace. It doesn't makes sense
1742 with mgr/volumes. This test validates the transparent update of 'volumes'
1743 key to 'subvolumes' key in auth metadata file during authorize.
1744 """
1745
1746 guest_mount = self.mount_b
1747
1748 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
1749 group = self._generate_random_group_name()
1750
1751 auth_id = "guest1"
1752 guestclient_1 = {
1753 "auth_id": auth_id,
1754 "tenant_id": "tenant1",
1755 }
1756
1757 # create group
1758 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1759
1760 # create subvolumes in group
1761 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
1762 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
1763
1764 # Authorize 'guestclient_1' to access the subvolume1.
1765 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
1766 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1767
1768 # Check that auth metadata file for auth ID 'guest1', is
1769 # created on authorizing 'guest1' access to the subvolume1.
1770 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1771 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1772
1773 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
522d829b 1774 guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
1775
1776 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
1777 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
1778 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1779
1780 expected_auth_metadata = {
1781 "version": 5,
1782 "compat_version": 6,
1783 "dirty": False,
1784 "tenant_id": "tenant1",
1785 "subvolumes": {
1786 "{0}/{1}".format(group,subvolume1): {
1787 "dirty": False,
1788 "access_level": "rw"
1789 },
1790 "{0}/{1}".format(group,subvolume2): {
1791 "dirty": False,
1792 "access_level": "rw"
1793 }
1794 }
1795 }
1796
1797 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
1798
1799 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
1800 del expected_auth_metadata["version"]
1801 del auth_metadata["version"]
1802 self.assertEqual(expected_auth_metadata, auth_metadata)
1803
1804 # clean up
1805 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
1806 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
1807 guest_mount.umount_wait()
1808 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1809 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
1810 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
1811 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1812
1813 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self):
1814 """
1815 CephVolumeClient stores the subvolume data in auth metadata file with
1816 'volumes' key as there was no subvolume namespace. It doesn't makes sense
1817 with mgr/volumes. This test validates the transparent update of 'volumes'
1818 key to 'subvolumes' key in auth metadata file during deauthorize.
1819 """
1820
1821 guest_mount = self.mount_b
1822
1823 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
1824 group = self._generate_random_group_name()
1825
1826 auth_id = "guest1"
1827 guestclient_1 = {
1828 "auth_id": auth_id,
1829 "tenant_id": "tenant1",
1830 }
1831
1832 # create group
1833 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1834
1835 # create subvolumes in group
1836 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
1837 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
1838
1839 # Authorize 'guestclient_1' to access the subvolume1.
1840 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
1841 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1842
1843 # Authorize 'guestclient_1' to access the subvolume2.
1844 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
1845 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1846
1847 # Check that auth metadata file for auth ID 'guest1', is created.
1848 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
1849 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
1850
1851 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
522d829b 1852 guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
cd265ab1
TL
1853
1854 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
1855 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
1856
1857 expected_auth_metadata = {
1858 "version": 5,
1859 "compat_version": 6,
1860 "dirty": False,
1861 "tenant_id": "tenant1",
1862 "subvolumes": {
1863 "{0}/{1}".format(group,subvolume1): {
1864 "dirty": False,
1865 "access_level": "rw"
1866 }
1867 }
1868 }
1869
1870 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
1871
1872 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
1873 del expected_auth_metadata["version"]
1874 del auth_metadata["version"]
1875 self.assertEqual(expected_auth_metadata, auth_metadata)
1876
1877 # clean up
1878 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
1879 guest_mount.umount_wait()
1880 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
1881 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
1882 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
1883 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1884
cd265ab1
TL
1885 def test_subvolume_evict_client(self):
1886 """
1887 That a subvolume client can be evicted based on the auth ID
1888 """
1889
1890 subvolumes = self._generate_random_subvolume_name(2)
1891 group = self._generate_random_group_name()
1892
1893 # create group
1894 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1895
1896 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
1897 for i in range(0, 2):
1898 self.mounts[i].umount_wait()
1899 guest_mounts = (self.mounts[0], self.mounts[1])
1900 auth_id = "guest"
1901 guestclient_1 = {
1902 "auth_id": auth_id,
1903 "tenant_id": "tenant1",
1904 }
1905
1906 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
1907 # subvolumes. Mount the two subvolumes. Write data to the volumes.
1908 for i in range(2):
1909 # Create subvolume.
522d829b 1910 self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group, "--mode=777")
cd265ab1
TL
1911
1912 # authorize guest authID read-write access to subvolume
1913 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolumes[i], guestclient_1["auth_id"],
1914 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
1915
1916 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolumes[i],
1917 "--group_name", group).rstrip()
1918 # configure credentials for guest client
1919 self._configure_guest_auth(guest_mounts[i], auth_id, key)
1920
1921 # mount the subvolume, and write to it
522d829b 1922 guest_mounts[i].mount_wait(cephfs_mntpt=mount_path)
cd265ab1
TL
1923 guest_mounts[i].write_n_mb("data.bin", 1)
1924
1925 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
1926 # one volume.
1927 self._fs_cmd("subvolume", "evict", self.volname, subvolumes[0], auth_id, "--group_name", group)
1928
1929 # Evicted guest client, guest_mounts[0], should not be able to do
1930 # anymore metadata ops. It should start failing all operations
1931 # when it sees that its own address is in the blocklist.
1932 try:
1933 guest_mounts[0].write_n_mb("rogue.bin", 1)
1934 except CommandFailedError:
1935 pass
1936 else:
1937 raise RuntimeError("post-eviction write should have failed!")
1938
1939 # The blocklisted guest client should now be unmountable
1940 guest_mounts[0].umount_wait()
1941
1942 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
1943 # has mounted the other volume, should be able to use its volume
1944 # unaffected.
1945 guest_mounts[1].write_n_mb("data.bin.1", 1)
1946
1947 # Cleanup.
1948 guest_mounts[1].umount_wait()
1949 for i in range(2):
1950 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolumes[i], auth_id, "--group_name", group)
1951 self._fs_cmd("subvolume", "rm", self.volname, subvolumes[i], "--group_name", group)
1952 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1953
f67539c2
TL
1954 def test_subvolume_pin_random(self):
1955 self.fs.set_max_mds(2)
1956 self.fs.wait_for_daemons()
1957 self.config_set('mds', 'mds_export_ephemeral_random', True)
1911f103
TL
1958
1959 subvolume = self._generate_random_subvolume_name()
1911f103 1960 self._fs_cmd("subvolume", "create", self.volname, subvolume)
f67539c2
TL
1961 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01")
1962 # no verification
1911f103 1963
f67539c2 1964 # remove subvolume
1911f103 1965 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1911f103
TL
1966
1967 # verify trash dir is clean
1968 self._wait_for_trash_empty()
1969
f67539c2
TL
1970 def test_subvolume_resize_fail_invalid_size(self):
1971 """
1972 That a subvolume cannot be resized to an invalid size and the quota did not change
1973 """
1911f103 1974
f67539c2
TL
1975 osize = self.DEFAULT_FILE_SIZE*1024*1024
1976 # create subvolume
1977 subvolname = self._generate_random_subvolume_name()
1978 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 1979
f67539c2
TL
1980 # make sure it exists
1981 subvolpath = self._get_subvolume_path(self.volname, subvolname)
1982 self.assertNotEqual(subvolpath, None)
81eedcae 1983
f67539c2
TL
1984 # try to resize the subvolume with an invalid size -10
1985 nsize = -10
1986 try:
1987 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
1988 except CommandFailedError as ce:
1989 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
1990 else:
1991 self.fail("expected the 'fs subvolume resize' command to fail")
81eedcae 1992
f67539c2
TL
1993 # verify the quota did not change
1994 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
1995 self.assertEqual(size, osize)
81eedcae
TL
1996
1997 # remove subvolume
f67539c2 1998 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 1999
494da23a
TL
2000 # verify trash dir is clean
2001 self._wait_for_trash_empty()
2002
f67539c2
TL
2003 def test_subvolume_resize_fail_zero_size(self):
2004 """
2005 That a subvolume cannot be resized to a zero size and the quota did not change
2006 """
81eedcae 2007
f67539c2
TL
2008 osize = self.DEFAULT_FILE_SIZE*1024*1024
2009 # create subvolume
2010 subvolname = self._generate_random_subvolume_name()
2011 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 2012
f67539c2
TL
2013 # make sure it exists
2014 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2015 self.assertNotEqual(subvolpath, None)
81eedcae 2016
f67539c2
TL
2017 # try to resize the subvolume with size 0
2018 nsize = 0
2019 try:
2020 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2021 except CommandFailedError as ce:
2022 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
2023 else:
2024 self.fail("expected the 'fs subvolume resize' command to fail")
81eedcae 2025
f67539c2
TL
2026 # verify the quota did not change
2027 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2028 self.assertEqual(size, osize)
81eedcae 2029
f67539c2
TL
2030 # remove subvolume
2031 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2032
f67539c2
TL
2033 # verify trash dir is clean
2034 self._wait_for_trash_empty()
81eedcae 2035
f67539c2
TL
2036 def test_subvolume_resize_quota_lt_used_size(self):
2037 """
2038 That a subvolume can be resized to a size smaller than the current used size
2039 and the resulting quota matches the expected size.
2040 """
81eedcae 2041
f67539c2
TL
2042 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
2043 # create subvolume
2044 subvolname = self._generate_random_subvolume_name()
522d829b 2045 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
81eedcae 2046
f67539c2
TL
2047 # make sure it exists
2048 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2049 self.assertNotEqual(subvolpath, None)
81eedcae 2050
f67539c2
TL
2051 # create one file of 10MB
2052 file_size=self.DEFAULT_FILE_SIZE*10
2053 number_of_files=1
2054 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2055 number_of_files,
2056 file_size))
2057 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
2058 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
81eedcae 2059
f67539c2
TL
2060 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
2061 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
2062 if isinstance(self.mount_a, FuseMount):
2063 # kclient dir does not have size==rbytes
2064 self.assertEqual(usedsize, susedsize)
81eedcae 2065
f67539c2
TL
2066 # shrink the subvolume
2067 nsize = usedsize // 2
2068 try:
2069 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2070 except CommandFailedError:
2071 self.fail("expected the 'fs subvolume resize' command to succeed")
81eedcae 2072
f67539c2
TL
2073 # verify the quota
2074 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2075 self.assertEqual(size, nsize)
81eedcae 2076
f67539c2
TL
2077 # remove subvolume
2078 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2079
adb31ebb
TL
2080 # verify trash dir is clean
2081 self._wait_for_trash_empty()
2082
f67539c2 2083 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self):
92f5a8d4 2084 """
f67539c2
TL
2085 That a subvolume cannot be resized to a size smaller than the current used size
2086 when --no_shrink is given and the quota did not change.
92f5a8d4 2087 """
92f5a8d4 2088
f67539c2
TL
2089 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
2090 # create subvolume
2091 subvolname = self._generate_random_subvolume_name()
522d829b 2092 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
92f5a8d4
TL
2093
2094 # make sure it exists
f67539c2
TL
2095 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2096 self.assertNotEqual(subvolpath, None)
81eedcae 2097
f67539c2
TL
2098 # create one file of 10MB
2099 file_size=self.DEFAULT_FILE_SIZE*10
2100 number_of_files=1
2101 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2102 number_of_files,
2103 file_size))
2104 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
2105 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
81eedcae 2106
f67539c2
TL
2107 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
2108 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
2109 if isinstance(self.mount_a, FuseMount):
2110 # kclient dir does not have size==rbytes
2111 self.assertEqual(usedsize, susedsize)
81eedcae 2112
f67539c2
TL
2113 # shrink the subvolume
2114 nsize = usedsize // 2
2115 try:
2116 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink")
2117 except CommandFailedError as ce:
2118 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
2119 else:
2120 self.fail("expected the 'fs subvolume resize' command to fail")
81eedcae 2121
f67539c2
TL
2122 # verify the quota did not change
2123 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2124 self.assertEqual(size, osize)
81eedcae 2125
f67539c2
TL
2126 # remove subvolume
2127 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2128
adb31ebb
TL
2129 # verify trash dir is clean
2130 self._wait_for_trash_empty()
2131
f67539c2 2132 def test_subvolume_resize_expand_on_full_subvolume(self):
92f5a8d4 2133 """
f67539c2 2134 That the subvolume can be expanded from a full subvolume and future writes succeed.
92f5a8d4 2135 """
92f5a8d4 2136
f67539c2
TL
2137 osize = self.DEFAULT_FILE_SIZE*1024*1024*10
2138 # create subvolume of quota 10MB and make sure it exists
92f5a8d4 2139 subvolname = self._generate_random_subvolume_name()
522d829b 2140 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
92f5a8d4
TL
2141 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2142 self.assertNotEqual(subvolpath, None)
2143
f67539c2
TL
2144 # create one file of size 10MB and write
2145 file_size=self.DEFAULT_FILE_SIZE*10
2146 number_of_files=1
2147 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2148 number_of_files,
2149 file_size))
2150 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3)
2151 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2152
2153 # create a file of size 5MB and try write more
2154 file_size=file_size // 2
2155 number_of_files=1
2156 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2157 number_of_files,
2158 file_size))
2159 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4)
2160 try:
2161 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2162 except CommandFailedError:
2163 # Not able to write. So expand the subvolume more and try writing the 5MB file again
2164 nsize = osize*2
2165 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2166 try:
2167 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2168 except CommandFailedError:
2169 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
2170 "to succeed".format(subvolname, number_of_files, file_size))
2171 else:
2172 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
2173 "to fail".format(subvolname, number_of_files, file_size))
92f5a8d4
TL
2174
2175 # remove subvolume
2176 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2177
adb31ebb
TL
2178 # verify trash dir is clean
2179 self._wait_for_trash_empty()
2180
f67539c2
TL
2181 def test_subvolume_resize_infinite_size(self):
2182 """
2183 That a subvolume can be resized to an infinite size by unsetting its quota.
2184 """
81eedcae
TL
2185
2186 # create subvolume
f67539c2
TL
2187 subvolname = self._generate_random_subvolume_name()
2188 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
2189 str(self.DEFAULT_FILE_SIZE*1024*1024))
81eedcae 2190
f67539c2
TL
2191 # make sure it exists
2192 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2193 self.assertNotEqual(subvolpath, None)
81eedcae 2194
f67539c2
TL
2195 # resize inf
2196 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
2197
2198 # verify that the quota is None
2199 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
2200 self.assertEqual(size, None)
81eedcae
TL
2201
2202 # remove subvolume
f67539c2 2203 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2204
494da23a
TL
2205 # verify trash dir is clean
2206 self._wait_for_trash_empty()
2207
f67539c2 2208 def test_subvolume_resize_infinite_size_future_writes(self):
e306af50 2209 """
f67539c2 2210 That a subvolume can be resized to an infinite size and the future writes succeed.
e306af50
TL
2211 """
2212
e306af50 2213 # create subvolume
f67539c2
TL
2214 subvolname = self._generate_random_subvolume_name()
2215 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
522d829b 2216 str(self.DEFAULT_FILE_SIZE*1024*1024*5), "--mode=777")
e306af50 2217
f67539c2
TL
2218 # make sure it exists
2219 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2220 self.assertNotEqual(subvolpath, None)
e306af50 2221
f67539c2
TL
2222 # resize inf
2223 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
e306af50 2224
f67539c2
TL
2225 # verify that the quota is None
2226 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
2227 self.assertEqual(size, None)
e306af50 2228
f67539c2
TL
2229 # create one file of 10MB and try to write
2230 file_size=self.DEFAULT_FILE_SIZE*10
2231 number_of_files=1
2232 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
2233 number_of_files,
2234 file_size))
2235 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5)
adb31ebb 2236
f67539c2
TL
2237 try:
2238 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
2239 except CommandFailedError:
2240 self.fail("expected filling subvolume {0} with {1} file of size {2}MB "
2241 "to succeed".format(subvolname, number_of_files, file_size))
e306af50
TL
2242
2243 # remove subvolume
f67539c2 2244 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
e306af50
TL
2245
2246 # verify trash dir is clean
2247 self._wait_for_trash_empty()
2248
f67539c2
TL
2249 def test_subvolume_rm_force(self):
2250 # test removing non-existing subvolume with --force
81eedcae 2251 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
2252 try:
2253 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
2254 except CommandFailedError:
2255 self.fail("expected the 'fs subvolume rm --force' command to succeed")
2256
2257 def test_subvolume_shrink(self):
2258 """
2259 That a subvolume can be shrinked in size and its quota matches the expected size.
2260 """
81eedcae
TL
2261
2262 # create subvolume
f67539c2
TL
2263 subvolname = self._generate_random_subvolume_name()
2264 osize = self.DEFAULT_FILE_SIZE*1024*1024
2265 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
81eedcae 2266
f67539c2
TL
2267 # make sure it exists
2268 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2269 self.assertNotEqual(subvolpath, None)
81eedcae 2270
f67539c2
TL
2271 # shrink the subvolume
2272 nsize = osize // 2
2273 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
81eedcae 2274
f67539c2
TL
2275 # verify the quota
2276 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2277 self.assertEqual(size, nsize)
81eedcae
TL
2278
2279 # remove subvolume
f67539c2 2280 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
81eedcae 2281
494da23a
TL
2282 # verify trash dir is clean
2283 self._wait_for_trash_empty()
2284
f67539c2
TL
2285
2286class TestSubvolumeGroupSnapshots(TestVolumesHelper):
2287 """Tests for FS subvolume group snapshot operations."""
2288 @unittest.skip("skipping subvolumegroup snapshot tests")
2289 def test_nonexistent_subvolume_group_snapshot_rm(self):
81eedcae 2290 subvolume = self._generate_random_subvolume_name()
f67539c2 2291 group = self._generate_random_group_name()
81eedcae
TL
2292 snapshot = self._generate_random_snapshot_name()
2293
f67539c2
TL
2294 # create group
2295 self._fs_cmd("subvolumegroup", "create", self.volname, group)
81eedcae 2296
f67539c2
TL
2297 # create subvolume in group
2298 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2299
2300 # snapshot group
2301 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
81eedcae
TL
2302
2303 # remove snapshot
f67539c2 2304 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
81eedcae 2305
f67539c2 2306 # remove snapshot
81eedcae 2307 try:
f67539c2 2308 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
81eedcae
TL
2309 except CommandFailedError as ce:
2310 if ce.exitstatus != errno.ENOENT:
2311 raise
92f5a8d4 2312 else:
f67539c2 2313 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
81eedcae
TL
2314
2315 # remove subvolume
f67539c2 2316 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
81eedcae 2317
494da23a
TL
2318 # verify trash dir is clean
2319 self._wait_for_trash_empty()
2320
f67539c2
TL
2321 # remove group
2322 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2323
2324 @unittest.skip("skipping subvolumegroup snapshot tests")
2325 def test_subvolume_group_snapshot_create_and_rm(self):
92f5a8d4 2326 subvolume = self._generate_random_subvolume_name()
f67539c2 2327 group = self._generate_random_group_name()
92f5a8d4
TL
2328 snapshot = self._generate_random_snapshot_name()
2329
f67539c2
TL
2330 # create group
2331 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2332
2333 # create subvolume in group
2334 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2335
2336 # snapshot group
2337 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
2338
92f5a8d4 2339 # remove snapshot
f67539c2 2340 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
92f5a8d4 2341
f67539c2
TL
2342 # remove subvolume
2343 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
2344
2345 # verify trash dir is clean
2346 self._wait_for_trash_empty()
2347
2348 # remove group
2349 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2350
2351 @unittest.skip("skipping subvolumegroup snapshot tests")
2352 def test_subvolume_group_snapshot_idempotence(self):
81eedcae
TL
2353 subvolume = self._generate_random_subvolume_name()
2354 group = self._generate_random_group_name()
2355 snapshot = self._generate_random_snapshot_name()
2356
2357 # create group
2358 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2359
2360 # create subvolume in group
2361 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2362
f67539c2
TL
2363 # snapshot group
2364 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
2365
2366 # try creating snapshot w/ same snapshot name -- shoule be idempotent
2367 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
81eedcae
TL
2368
2369 # remove snapshot
f67539c2 2370 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
81eedcae
TL
2371
2372 # remove subvolume
2373 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
2374
494da23a
TL
2375 # verify trash dir is clean
2376 self._wait_for_trash_empty()
2377
81eedcae
TL
2378 # remove group
2379 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2380
f67539c2
TL
2381 @unittest.skip("skipping subvolumegroup snapshot tests")
2382 def test_subvolume_group_snapshot_ls(self):
2383 # tests the 'fs subvolumegroup snapshot ls' command
eafe8130
TL
2384
2385 snapshots = []
2386
f67539c2
TL
2387 # create group
2388 group = self._generate_random_group_name()
2389 self._fs_cmd("subvolumegroup", "create", self.volname, group)
eafe8130 2390
f67539c2 2391 # create subvolumegroup snapshots
92f5a8d4
TL
2392 snapshots = self._generate_random_snapshot_name(3)
2393 for snapshot in snapshots:
f67539c2 2394 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
eafe8130 2395
f67539c2
TL
2396 subvolgrpsnapshotls = json.loads(self._fs_cmd('subvolumegroup', 'snapshot', 'ls', self.volname, group))
2397 if len(subvolgrpsnapshotls) == 0:
2398 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
eafe8130 2399 else:
f67539c2 2400 snapshotnames = [snapshot['name'] for snapshot in subvolgrpsnapshotls]
eafe8130 2401 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
f67539c2 2402 raise RuntimeError("Error creating or listing subvolume group snapshots")
adb31ebb 2403
f67539c2
TL
2404 @unittest.skip("skipping subvolumegroup snapshot tests")
2405 def test_subvolume_group_snapshot_rm_force(self):
2406 # test removing non-existing subvolume group snapshot with --force
2407 group = self._generate_random_group_name()
2408 snapshot = self._generate_random_snapshot_name()
2409 # remove snapshot
2410 try:
2411 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force")
2412 except CommandFailedError:
2413 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
adb31ebb
TL
2414
2415 def test_subvolume_group_snapshot_unsupported_status(self):
2416 group = self._generate_random_group_name()
2417 snapshot = self._generate_random_snapshot_name()
eafe8130 2418
adb31ebb
TL
2419 # create group
2420 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2421
2422 # snapshot group
2423 try:
2424 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
2425 except CommandFailedError as ce:
2426 self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create")
2427 else:
2428 self.fail("expected subvolumegroup snapshot create command to fail")
2429
2430 # remove group
2431 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2432
cd265ab1 2433
f67539c2
TL
2434class TestSubvolumeSnapshots(TestVolumesHelper):
2435 """Tests for FS subvolume snapshot operations."""
2436 def test_nonexistent_subvolume_snapshot_rm(self):
cd265ab1 2437 subvolume = self._generate_random_subvolume_name()
f67539c2 2438 snapshot = self._generate_random_snapshot_name()
cd265ab1 2439
f67539c2
TL
2440 # create subvolume
2441 self._fs_cmd("subvolume", "create", self.volname, subvolume)
cd265ab1 2442
f67539c2
TL
2443 # snapshot subvolume
2444 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
cd265ab1 2445
f67539c2
TL
2446 # remove snapshot
2447 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
cd265ab1 2448
f67539c2
TL
2449 # remove snapshot again
2450 try:
2451 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
2452 except CommandFailedError as ce:
2453 if ce.exitstatus != errno.ENOENT:
2454 raise
2455 else:
2456 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
cd265ab1 2457
f67539c2
TL
2458 # remove subvolume
2459 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
cd265ab1 2460
f67539c2
TL
2461 # verify trash dir is clean
2462 self._wait_for_trash_empty()
2463
2464 def test_subvolume_snapshot_create_and_rm(self):
2465 subvolume = self._generate_random_subvolume_name()
2466 snapshot = self._generate_random_snapshot_name()
2467
2468 # create subvolume
2469 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2470
2471 # snapshot subvolume
2472 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
cd265ab1
TL
2473
2474 # remove snapshot
f67539c2 2475 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
cd265ab1
TL
2476
2477 # remove subvolume
f67539c2 2478 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
cd265ab1
TL
2479
2480 # verify trash dir is clean
2481 self._wait_for_trash_empty()
2482
f67539c2 2483 def test_subvolume_snapshot_create_idempotence(self):
cd265ab1 2484 subvolume = self._generate_random_subvolume_name()
f67539c2 2485 snapshot = self._generate_random_snapshot_name()
cd265ab1 2486
f67539c2
TL
2487 # create subvolume
2488 self._fs_cmd("subvolume", "create", self.volname, subvolume)
cd265ab1 2489
f67539c2
TL
2490 # snapshot subvolume
2491 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
cd265ab1 2492
f67539c2
TL
2493 # try creating w/ same subvolume snapshot name -- should be idempotent
2494 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
cd265ab1 2495
f67539c2
TL
2496 # remove snapshot
2497 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
cd265ab1
TL
2498
2499 # remove subvolume
f67539c2 2500 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
cd265ab1
TL
2501
2502 # verify trash dir is clean
2503 self._wait_for_trash_empty()
2504
f67539c2 2505 def test_subvolume_snapshot_info(self):
cd265ab1 2506
cd265ab1 2507 """
f67539c2 2508 tests the 'fs subvolume snapshot info' command
cd265ab1
TL
2509 """
2510
f67539c2
TL
2511 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
2512
cd265ab1 2513 subvolume = self._generate_random_subvolume_name()
f67539c2 2514 snapshot, snap_missing = self._generate_random_snapshot_name(2)
cd265ab1 2515
f67539c2 2516 # create subvolume
522d829b 2517 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
cd265ab1 2518
f67539c2
TL
2519 # do some IO
2520 self._do_subvolume_io(subvolume, number_of_files=1)
cd265ab1 2521
f67539c2
TL
2522 # snapshot subvolume
2523 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
cd265ab1 2524
f67539c2
TL
2525 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
2526 for md in snap_md:
2527 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
2528 self.assertEqual(snap_info["has_pending_clones"], "no")
cd265ab1 2529
f67539c2 2530 # snapshot info for non-existent snapshot
cd265ab1 2531 try:
f67539c2 2532 self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing)
cd265ab1 2533 except CommandFailedError as ce:
f67539c2 2534 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot")
cd265ab1 2535 else:
f67539c2 2536 self.fail("expected snapshot info of non-existent snapshot to fail")
cd265ab1 2537
f67539c2
TL
2538 # remove snapshot
2539 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
cd265ab1
TL
2540
2541 # remove subvolume
f67539c2 2542 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
cd265ab1
TL
2543
2544 # verify trash dir is clean
2545 self._wait_for_trash_empty()
2546
f67539c2 2547 def test_subvolume_snapshot_in_group(self):
cd265ab1
TL
2548 subvolume = self._generate_random_subvolume_name()
2549 group = self._generate_random_group_name()
f67539c2 2550 snapshot = self._generate_random_snapshot_name()
cd265ab1
TL
2551
2552 # create group
2553 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2554
2555 # create subvolume in group
2556 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2557
f67539c2
TL
2558 # snapshot subvolume in group
2559 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
cd265ab1 2560
f67539c2
TL
2561 # remove snapshot
2562 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
cd265ab1
TL
2563
2564 # remove subvolume
2565 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
2566
2567 # verify trash dir is clean
2568 self._wait_for_trash_empty()
2569
2570 # remove group
2571 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2572
f67539c2
TL
2573 def test_subvolume_snapshot_ls(self):
2574 # tests the 'fs subvolume snapshot ls' command
81eedcae 2575
f67539c2 2576 snapshots = []
81eedcae 2577
f67539c2
TL
2578 # create subvolume
2579 subvolume = self._generate_random_subvolume_name()
2580 self._fs_cmd("subvolume", "create", self.volname, subvolume)
81eedcae 2581
f67539c2
TL
2582 # create subvolume snapshots
2583 snapshots = self._generate_random_snapshot_name(3)
2584 for snapshot in snapshots:
2585 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
2586
2587 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
2588 if len(subvolsnapshotls) == 0:
2589 self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
2590 else:
2591 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
2592 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
2593 self.fail("Error creating or listing subvolume snapshots")
81eedcae
TL
2594
2595 # remove snapshot
f67539c2
TL
2596 for snapshot in snapshots:
2597 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
81eedcae
TL
2598
2599 # remove subvolume
f67539c2 2600 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
81eedcae 2601
494da23a
TL
2602 # verify trash dir is clean
2603 self._wait_for_trash_empty()
2604
f67539c2
TL
2605 def test_subvolume_inherited_snapshot_ls(self):
2606 # tests the scenario where 'fs subvolume snapshot ls' command
2607 # should not list inherited snapshots created as part of snapshot
2608 # at ancestral level
81eedcae 2609
f67539c2 2610 snapshots = []
81eedcae
TL
2611 subvolume = self._generate_random_subvolume_name()
2612 group = self._generate_random_group_name()
f67539c2 2613 snap_count = 3
81eedcae
TL
2614
2615 # create group
2616 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2617
2618 # create subvolume in group
2619 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2620
f67539c2
TL
2621 # create subvolume snapshots
2622 snapshots = self._generate_random_snapshot_name(snap_count)
2623 for snapshot in snapshots:
2624 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
2625
2626 # Create snapshot at ancestral level
2627 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1")
2628 ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2")
522d829b 2629 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1, ancestral_snappath2], sudo=True)
f67539c2
TL
2630
2631 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group))
2632 self.assertEqual(len(subvolsnapshotls), snap_count)
81eedcae 2633
f67539c2 2634 # remove ancestral snapshots
522d829b 2635 self.mount_a.run_shell(['rmdir', ancestral_snappath1, ancestral_snappath2], sudo=True)
81eedcae
TL
2636
2637 # remove snapshot
f67539c2
TL
2638 for snapshot in snapshots:
2639 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
81eedcae
TL
2640
2641 # remove subvolume
2642 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
2643
494da23a
TL
2644 # verify trash dir is clean
2645 self._wait_for_trash_empty()
2646
81eedcae
TL
2647 # remove group
2648 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2649
f67539c2
TL
2650 def test_subvolume_inherited_snapshot_info(self):
2651 """
2652 tests the scenario where 'fs subvolume snapshot info' command
2653 should fail for inherited snapshots created as part of snapshot
2654 at ancestral level
2655 """
2656
81eedcae
TL
2657 subvolume = self._generate_random_subvolume_name()
2658 group = self._generate_random_group_name()
81eedcae
TL
2659
2660 # create group
2661 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2662
2663 # create subvolume in group
2664 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2665
f67539c2
TL
2666 # Create snapshot at ancestral level
2667 ancestral_snap_name = "ancestral_snap_1"
2668 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
522d829b 2669 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
81eedcae 2670
f67539c2
TL
2671 # Validate existence of inherited snapshot
2672 group_path = os.path.join(".", "volumes", group)
2673 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
2674 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
2675 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
2676 self.mount_a.run_shell(['ls', inherited_snappath])
81eedcae 2677
f67539c2 2678 # snapshot info on inherited snapshot
81eedcae 2679 try:
f67539c2 2680 self._get_subvolume_snapshot_info(self.volname, subvolume, inherited_snap, group)
81eedcae 2681 except CommandFailedError as ce:
f67539c2 2682 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on snapshot info of inherited snapshot")
92f5a8d4 2683 else:
f67539c2
TL
2684 self.fail("expected snapshot info of inherited snapshot to fail")
2685
2686 # remove ancestral snapshots
522d829b 2687 self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
81eedcae
TL
2688
2689 # remove subvolume
f67539c2 2690 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
81eedcae 2691
494da23a
TL
2692 # verify trash dir is clean
2693 self._wait_for_trash_empty()
2694
81eedcae
TL
2695 # remove group
2696 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
494da23a 2697
f67539c2
TL
2698 def test_subvolume_inherited_snapshot_rm(self):
2699 """
2700 tests the scenario where 'fs subvolume snapshot rm' command
2701 should fail for inherited snapshots created as part of snapshot
2702 at ancestral level
2703 """
eafe8130 2704
f67539c2
TL
2705 subvolume = self._generate_random_subvolume_name()
2706 group = self._generate_random_group_name()
eafe8130
TL
2707
2708 # create group
eafe8130
TL
2709 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2710
f67539c2
TL
2711 # create subvolume in group
2712 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
eafe8130 2713
f67539c2
TL
2714 # Create snapshot at ancestral level
2715 ancestral_snap_name = "ancestral_snap_1"
2716 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
522d829b 2717 self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
92f5a8d4 2718
f67539c2
TL
2719 # Validate existence of inherited snap
2720 group_path = os.path.join(".", "volumes", group)
2721 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
2722 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
2723 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
2724 self.mount_a.run_shell(['ls', inherited_snappath])
92f5a8d4 2725
f67539c2
TL
2726 # inherited snapshot should not be deletable
2727 try:
2728 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, inherited_snap, "--group_name", group)
2729 except CommandFailedError as ce:
2730 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when removing inherited snapshot")
2731 else:
2732 self.fail("expected removing inheirted snapshot to fail")
92f5a8d4 2733
f67539c2 2734 # remove ancestral snapshots
522d829b 2735 self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
92f5a8d4 2736
f67539c2
TL
2737 # remove subvolume
2738 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
92f5a8d4
TL
2739
2740 # verify trash dir is clean
f67539c2 2741 self._wait_for_trash_empty()
9f95a23c 2742
f67539c2
TL
2743 # remove group
2744 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
9f95a23c 2745
f67539c2 2746 def test_subvolume_subvolumegroup_snapshot_name_conflict(self):
92f5a8d4 2747 """
f67539c2
TL
2748 tests the scenario where creation of subvolume snapshot name
2749 with same name as it's subvolumegroup snapshot name. This should
2750 fail.
92f5a8d4 2751 """
92f5a8d4 2752
f67539c2
TL
2753 subvolume = self._generate_random_subvolume_name()
2754 group = self._generate_random_group_name()
2755 group_snapshot = self._generate_random_snapshot_name()
92f5a8d4
TL
2756
2757 # create group
f67539c2 2758 self._fs_cmd("subvolumegroup", "create", self.volname, group)
92f5a8d4 2759
f67539c2
TL
2760 # create subvolume in group
2761 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
92f5a8d4 2762
f67539c2
TL
2763 # Create subvolumegroup snapshot
2764 group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot)
522d829b 2765 self.mount_a.run_shell(['mkdir', '-p', group_snapshot_path], sudo=True)
92f5a8d4 2766
f67539c2
TL
2767 # Validate existence of subvolumegroup snapshot
2768 self.mount_a.run_shell(['ls', group_snapshot_path])
92f5a8d4 2769
f67539c2
TL
2770 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
2771 try:
2772 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, group_snapshot, "--group_name", group)
2773 except CommandFailedError as ce:
2774 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
2775 else:
2776 self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
2777
2778 # remove subvolumegroup snapshot
522d829b 2779 self.mount_a.run_shell(['rmdir', group_snapshot_path], sudo=True)
adb31ebb 2780
92f5a8d4 2781 # remove subvolume
f67539c2 2782 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
92f5a8d4
TL
2783
2784 # verify trash dir is clean
2785 self._wait_for_trash_empty()
2786
2787 # remove group
2788 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2789
f67539c2 2790 def test_subvolume_retain_snapshot_invalid_recreate(self):
adb31ebb 2791 """
f67539c2 2792 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
adb31ebb 2793 """
494da23a 2794 subvolume = self._generate_random_subvolume_name()
92f5a8d4 2795 snapshot = self._generate_random_snapshot_name()
494da23a 2796
f67539c2
TL
2797 # create subvolume
2798 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4 2799
f67539c2
TL
2800 # snapshot subvolume
2801 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
494da23a 2802
f67539c2
TL
2803 # remove with snapshot retention
2804 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
494da23a 2805
f67539c2
TL
2806 # recreate subvolume with an invalid pool
2807 data_pool = "invalid_pool"
2808 try:
2809 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
2810 except CommandFailedError as ce:
2811 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname")
2812 else:
2813 self.fail("expected recreate of subvolume with invalid poolname to fail")
92f5a8d4 2814
f67539c2
TL
2815 # fetch info
2816 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
2817 self.assertEqual(subvol_info["state"], "snapshot-retained",
2818 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
92f5a8d4 2819
f67539c2
TL
2820 # getpath
2821 try:
2822 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
2823 except CommandFailedError as ce:
2824 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
2825 else:
2826 self.fail("expected getpath of subvolume with retained snapshots to fail")
92f5a8d4 2827
f67539c2
TL
2828 # remove snapshot (should remove volume)
2829 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4 2830
f67539c2
TL
2831 # verify trash dir is clean
2832 self._wait_for_trash_empty()
92f5a8d4 2833
f67539c2
TL
2834 def test_subvolume_retain_snapshot_recreate_subvolume(self):
2835 """
2836 ensure a retained subvolume can be recreated and further snapshotted
2837 """
2838 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
92f5a8d4 2839
f67539c2
TL
2840 subvolume = self._generate_random_subvolume_name()
2841 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
92f5a8d4 2842
f67539c2
TL
2843 # create subvolume
2844 self._fs_cmd("subvolume", "create", self.volname, subvolume)
92f5a8d4 2845
f67539c2
TL
2846 # snapshot subvolume
2847 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
92f5a8d4 2848
f67539c2
TL
2849 # remove with snapshot retention
2850 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
adb31ebb 2851
f67539c2
TL
2852 # fetch info
2853 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
2854 self.assertEqual(subvol_info["state"], "snapshot-retained",
2855 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
adb31ebb 2856
f67539c2
TL
2857 # recreate retained subvolume
2858 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 2859
f67539c2
TL
2860 # fetch info
2861 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
2862 self.assertEqual(subvol_info["state"], "complete",
2863 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
adb31ebb 2864
f67539c2
TL
2865 # snapshot info (older snapshot)
2866 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1))
adb31ebb
TL
2867 for md in snap_md:
2868 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
2869 self.assertEqual(snap_info["has_pending_clones"], "no")
2870
f67539c2
TL
2871 # snap-create (new snapshot)
2872 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
adb31ebb 2873
f67539c2
TL
2874 # remove with retain snapshots
2875 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
adb31ebb 2876
f67539c2
TL
2877 # list snapshots
2878 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
2879 self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
2880 " created subvolume snapshots")
2881 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
2882 for snap in [snapshot1, snapshot2]:
2883 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
adb31ebb 2884
f67539c2
TL
2885 # remove snapshots (should remove volume)
2886 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
2887 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
2888
2889 # verify list subvolumes returns an empty list
2890 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2891 self.assertEqual(len(subvolumels), 0)
adb31ebb
TL
2892
2893 # verify trash dir is clean
2894 self._wait_for_trash_empty()
2895
f67539c2 2896 def test_subvolume_retain_snapshot_with_snapshots(self):
adb31ebb 2897 """
f67539c2
TL
2898 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
2899 also test allowed and dis-allowed operations on a retained subvolume
adb31ebb 2900 """
f67539c2 2901 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
adb31ebb 2902
f67539c2
TL
2903 subvolume = self._generate_random_subvolume_name()
2904 snapshot = self._generate_random_snapshot_name()
adb31ebb 2905
f67539c2
TL
2906 # create subvolume
2907 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 2908
f67539c2
TL
2909 # snapshot subvolume
2910 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb 2911
f67539c2
TL
2912 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
2913 try:
2914 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2915 except CommandFailedError as ce:
2916 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of retained subvolume with snapshots")
2917 else:
2918 self.fail("expected rm of subvolume with retained snapshots to fail")
adb31ebb 2919
f67539c2
TL
2920 # remove with snapshot retention
2921 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
adb31ebb 2922
f67539c2
TL
2923 # fetch info
2924 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
2925 self.assertEqual(subvol_info["state"], "snapshot-retained",
2926 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
adb31ebb 2927
f67539c2
TL
2928 ## test allowed ops in retained state
2929 # ls
2930 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2931 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
2932 self.assertEqual(subvolumes[0]['name'], subvolume,
2933 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
adb31ebb 2934
f67539c2
TL
2935 # snapshot info
2936 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
2937 for md in snap_md:
2938 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
2939 self.assertEqual(snap_info["has_pending_clones"], "no")
adb31ebb 2940
f67539c2 2941 # rm --force (allowed but should fail)
adb31ebb 2942 try:
f67539c2 2943 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
adb31ebb 2944 except CommandFailedError as ce:
f67539c2 2945 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
adb31ebb 2946 else:
f67539c2 2947 self.fail("expected rm of subvolume with retained snapshots to fail")
adb31ebb 2948
f67539c2
TL
2949 # rm (allowed but should fail)
2950 try:
2951 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2952 except CommandFailedError as ce:
2953 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
2954 else:
2955 self.fail("expected rm of subvolume with retained snapshots to fail")
2956
2957 ## test disallowed ops
2958 # getpath
2959 try:
2960 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
2961 except CommandFailedError as ce:
2962 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
2963 else:
2964 self.fail("expected getpath of subvolume with retained snapshots to fail")
2965
2966 # resize
2967 nsize = self.DEFAULT_FILE_SIZE*1024*1024
2968 try:
2969 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
2970 except CommandFailedError as ce:
2971 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on resize of subvolume with retained snapshots")
2972 else:
2973 self.fail("expected resize of subvolume with retained snapshots to fail")
2974
2975 # snap-create
2976 try:
2977 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, "fail")
2978 except CommandFailedError as ce:
2979 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot create of subvolume with retained snapshots")
2980 else:
2981 self.fail("expected snapshot create of subvolume with retained snapshots to fail")
2982
2983 # remove snapshot (should remove volume)
2984 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb
TL
2985
2986 # verify list subvolumes returns an empty list
2987 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2988 self.assertEqual(len(subvolumels), 0)
2989
2990 # verify trash dir is clean
2991 self._wait_for_trash_empty()
2992
f67539c2 2993 def test_subvolume_retain_snapshot_without_snapshots(self):
adb31ebb 2994 """
f67539c2 2995 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
adb31ebb 2996 """
f67539c2 2997 subvolume = self._generate_random_subvolume_name()
adb31ebb 2998
f67539c2
TL
2999 # create subvolume
3000 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 3001
f67539c2
TL
3002 # remove with snapshot retention (should remove volume, no snapshots to retain)
3003 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
adb31ebb 3004
f67539c2
TL
3005 # verify list subvolumes returns an empty list
3006 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3007 self.assertEqual(len(subvolumels), 0)
3008
3009 # verify trash dir is clean
3010 self._wait_for_trash_empty()
3011
3012 def test_subvolume_retain_snapshot_trash_busy_recreate(self):
3013 """
3014 ensure retained subvolume recreate fails if its trash is not yet purged
3015 """
3016 subvolume = self._generate_random_subvolume_name()
3017 snapshot = self._generate_random_snapshot_name()
3018
3019 # create subvolume
3020 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3021
3022 # snapshot subvolume
3023 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3024
3025 # remove with snapshot retention
3026 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3027
3028 # fake a trash entry
3029 self._update_fake_trash(subvolume)
3030
3031 # recreate subvolume
3032 try:
3033 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3034 except CommandFailedError as ce:
3035 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of subvolume with purge pending")
3036 else:
3037 self.fail("expected recreate of subvolume with purge pending to fail")
3038
3039 # clear fake trash entry
3040 self._update_fake_trash(subvolume, create=False)
adb31ebb 3041
f67539c2
TL
3042 # recreate subvolume
3043 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 3044
f67539c2
TL
3045 # remove snapshot
3046 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb
TL
3047
3048 # remove subvolume
f67539c2 3049 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
3050
3051 # verify trash dir is clean
3052 self._wait_for_trash_empty()
3053
3054 def test_subvolume_rm_with_snapshots(self):
3055 subvolume = self._generate_random_subvolume_name()
3056 snapshot = self._generate_random_snapshot_name()
3057
3058 # create subvolume
3059 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3060
3061 # snapshot subvolume
3062 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3063
3064 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
3065 try:
3066 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3067 except CommandFailedError as ce:
3068 if ce.exitstatus != errno.ENOTEMPTY:
3069 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
3070 else:
3071 raise RuntimeError("expected subvolume deletion to fail")
3072
3073 # remove snapshot
3074 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3075
3076 # remove subvolume
3077 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3078
3079 # verify trash dir is clean
3080 self._wait_for_trash_empty()
3081
f67539c2 3082 def test_subvolume_snapshot_protect_unprotect_sanity(self):
adb31ebb 3083 """
f67539c2
TL
3084 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
3085 invoking the command does not cause errors, till they are removed from a subsequent release.
adb31ebb
TL
3086 """
3087 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
3088 snapshot = self._generate_random_snapshot_name()
3089 clone = self._generate_random_clone_name()
adb31ebb
TL
3090
3091 # create subvolume
522d829b 3092 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3093
f67539c2
TL
3094 # do some IO
3095 self._do_subvolume_io(subvolume, number_of_files=64)
adb31ebb 3096
f67539c2
TL
3097 # snapshot subvolume
3098 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3099
3100 # now, protect snapshot
3101 self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot)
3102
3103 # schedule a clone
3104 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3105
3106 # check clone status
3107 self._wait_for_clone_to_complete(clone)
3108
3109 # now, unprotect snapshot
3110 self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot)
3111
3112 # verify clone
3113 self._verify_clone(subvolume, snapshot, clone)
3114
3115 # remove snapshot
3116 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3117
3118 # remove subvolumes
3119 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3120 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
3121
3122 # verify trash dir is clean
3123 self._wait_for_trash_empty()
3124
f67539c2
TL
3125 def test_subvolume_snapshot_rm_force(self):
3126 # test removing non existing subvolume snapshot with --force
3127 subvolume = self._generate_random_subvolume_name()
3128 snapshot = self._generate_random_snapshot_name()
3129
3130 # remove snapshot
3131 try:
3132 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force")
3133 except CommandFailedError:
3134 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
3135
3136
3137class TestSubvolumeSnapshotClones(TestVolumesHelper):
3138 """ Tests for FS subvolume snapshot clone operations."""
3139 def test_clone_subvolume_info(self):
3140 # tests the 'fs subvolume info' command for a clone
3141 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
3142 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
3143 "type", "uid"]
adb31ebb
TL
3144
3145 subvolume = self._generate_random_subvolume_name()
3146 snapshot = self._generate_random_snapshot_name()
f67539c2 3147 clone = self._generate_random_clone_name()
adb31ebb
TL
3148
3149 # create subvolume
522d829b 3150 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3151
f67539c2
TL
3152 # do some IO
3153 self._do_subvolume_io(subvolume, number_of_files=1)
3154
adb31ebb
TL
3155 # snapshot subvolume
3156 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3157
f67539c2
TL
3158 # schedule a clone
3159 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 3160
f67539c2
TL
3161 # check clone status
3162 self._wait_for_clone_to_complete(clone)
adb31ebb 3163
f67539c2
TL
3164 # remove snapshot
3165 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb 3166
f67539c2
TL
3167 subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
3168 if len(subvol_info) == 0:
3169 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
3170 for md in subvol_md:
3171 if md not in subvol_info.keys():
3172 raise RuntimeError("%s not present in the metadata of subvolume" % md)
3173 if subvol_info["type"] != "clone":
3174 raise RuntimeError("type should be set to clone")
adb31ebb 3175
f67539c2
TL
3176 # remove subvolumes
3177 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3178 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb 3179
f67539c2
TL
3180 # verify trash dir is clean
3181 self._wait_for_trash_empty()
adb31ebb 3182
f67539c2
TL
3183 def test_non_clone_status(self):
3184 subvolume = self._generate_random_subvolume_name()
adb31ebb 3185
f67539c2
TL
3186 # create subvolume
3187 self._fs_cmd("subvolume", "create", self.volname, subvolume)
adb31ebb 3188
adb31ebb 3189 try:
f67539c2 3190 self._fs_cmd("clone", "status", self.volname, subvolume)
adb31ebb 3191 except CommandFailedError as ce:
f67539c2
TL
3192 if ce.exitstatus != errno.ENOTSUP:
3193 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
adb31ebb 3194 else:
f67539c2 3195 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
adb31ebb 3196
f67539c2
TL
3197 # remove subvolume
3198 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb 3199
f67539c2
TL
3200 # verify trash dir is clean
3201 self._wait_for_trash_empty()
3202
3203 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self):
3204 subvolume = self._generate_random_subvolume_name()
3205 snapshot = self._generate_random_snapshot_name()
3206 clone = self._generate_random_clone_name()
3207 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
3208
3209 # create subvolume, in an isolated namespace with a specified size
522d829b 3210 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize), "--mode=777")
f67539c2
TL
3211
3212 # do some IO
3213 self._do_subvolume_io(subvolume, number_of_files=8)
3214
3215 # snapshot subvolume
3216 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3217
3218 # create a pool different from current subvolume pool
3219 subvol_path = self._get_subvolume_path(self.volname, subvolume)
3220 default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
3221 new_pool = "new_pool"
3222 self.assertNotEqual(default_pool, new_pool)
3223 self.fs.add_data_pool(new_pool)
3224
3225 # update source subvolume pool
3226 self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="")
3227
3228 # schedule a clone, with NO --pool specification
3229 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3230
3231 # check clone status
3232 self._wait_for_clone_to_complete(clone)
3233
3234 # verify clone
3235 self._verify_clone(subvolume, snapshot, clone)
3236
3237 # remove snapshot
adb31ebb
TL
3238 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3239
f67539c2
TL
3240 # remove subvolumes
3241 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3242 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
3243
3244 # verify trash dir is clean
3245 self._wait_for_trash_empty()
3246
f67539c2 3247 def test_subvolume_clone_in_progress_getpath(self):
adb31ebb
TL
3248 subvolume = self._generate_random_subvolume_name()
3249 snapshot = self._generate_random_snapshot_name()
f67539c2 3250 clone = self._generate_random_clone_name()
adb31ebb
TL
3251
3252 # create subvolume
522d829b 3253 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3254
f67539c2
TL
3255 # do some IO
3256 self._do_subvolume_io(subvolume, number_of_files=64)
3257
adb31ebb
TL
3258 # snapshot subvolume
3259 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3260
522d829b
TL
3261 # Insert delay at the beginning of snapshot clone
3262 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3263
f67539c2
TL
3264 # schedule a clone
3265 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 3266
f67539c2 3267 # clone should not be accessible right now
adb31ebb 3268 try:
f67539c2 3269 self._get_subvolume_path(self.volname, clone)
adb31ebb 3270 except CommandFailedError as ce:
f67539c2
TL
3271 if ce.exitstatus != errno.EAGAIN:
3272 raise RuntimeError("invalid error code when fetching path of an pending clone")
adb31ebb 3273 else:
f67539c2 3274 raise RuntimeError("expected fetching path of an pending clone to fail")
adb31ebb 3275
f67539c2
TL
3276 # check clone status
3277 self._wait_for_clone_to_complete(clone)
adb31ebb 3278
f67539c2
TL
3279 # clone should be accessible now
3280 subvolpath = self._get_subvolume_path(self.volname, clone)
3281 self.assertNotEqual(subvolpath, None)
adb31ebb 3282
f67539c2
TL
3283 # verify clone
3284 self._verify_clone(subvolume, snapshot, clone)
3285
3286 # remove snapshot
adb31ebb
TL
3287 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3288
f67539c2
TL
3289 # remove subvolumes
3290 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3291 self._fs_cmd("subvolume", "rm", self.volname, clone)
3292
adb31ebb
TL
3293 # verify trash dir is clean
3294 self._wait_for_trash_empty()
3295
f67539c2 3296 def test_subvolume_clone_in_progress_snapshot_rm(self):
adb31ebb
TL
3297 subvolume = self._generate_random_subvolume_name()
3298 snapshot = self._generate_random_snapshot_name()
f67539c2 3299 clone = self._generate_random_clone_name()
adb31ebb
TL
3300
3301 # create subvolume
522d829b 3302 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3303
f67539c2
TL
3304 # do some IO
3305 self._do_subvolume_io(subvolume, number_of_files=64)
3306
adb31ebb
TL
3307 # snapshot subvolume
3308 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3309
522d829b
TL
3310 # Insert delay at the beginning of snapshot clone
3311 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3312
f67539c2
TL
3313 # schedule a clone
3314 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 3315
f67539c2 3316 # snapshot should not be deletable now
adb31ebb 3317 try:
f67539c2 3318 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb 3319 except CommandFailedError as ce:
f67539c2 3320 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
adb31ebb 3321 else:
f67539c2 3322 self.fail("expected removing source snapshot of a clone to fail")
adb31ebb 3323
f67539c2
TL
3324 # check clone status
3325 self._wait_for_clone_to_complete(clone)
adb31ebb 3326
f67539c2
TL
3327 # clone should be accessible now
3328 subvolpath = self._get_subvolume_path(self.volname, clone)
3329 self.assertNotEqual(subvolpath, None)
3330
3331 # verify clone
3332 self._verify_clone(subvolume, snapshot, clone)
adb31ebb
TL
3333
3334 # remove snapshot
3335 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3336
f67539c2 3337 # remove subvolumes
adb31ebb 3338 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2 3339 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
3340
3341 # verify trash dir is clean
3342 self._wait_for_trash_empty()
3343
f67539c2 3344 def test_subvolume_clone_in_progress_source(self):
adb31ebb
TL
3345 subvolume = self._generate_random_subvolume_name()
3346 snapshot = self._generate_random_snapshot_name()
3347 clone = self._generate_random_clone_name()
3348
3349 # create subvolume
522d829b 3350 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3351
f67539c2
TL
3352 # do some IO
3353 self._do_subvolume_io(subvolume, number_of_files=64)
3354
adb31ebb
TL
3355 # snapshot subvolume
3356 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3357
522d829b
TL
3358 # Insert delay at the beginning of snapshot clone
3359 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3360
f67539c2 3361 # schedule a clone
adb31ebb
TL
3362 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3363
f67539c2
TL
3364 # verify clone source
3365 result = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
3366 source = result['status']['source']
3367 self.assertEqual(source['volume'], self.volname)
3368 self.assertEqual(source['subvolume'], subvolume)
3369 self.assertEqual(source.get('group', None), None)
3370 self.assertEqual(source['snapshot'], snapshot)
3371
adb31ebb
TL
3372 # check clone status
3373 self._wait_for_clone_to_complete(clone)
3374
f67539c2
TL
3375 # clone should be accessible now
3376 subvolpath = self._get_subvolume_path(self.volname, clone)
3377 self.assertNotEqual(subvolpath, None)
adb31ebb 3378
f67539c2
TL
3379 # verify clone
3380 self._verify_clone(subvolume, snapshot, clone)
adb31ebb
TL
3381
3382 # remove snapshot
3383 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
adb31ebb 3384
f67539c2 3385 # remove subvolumes
adb31ebb
TL
3386 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3387 self._fs_cmd("subvolume", "rm", self.volname, clone)
3388
3389 # verify trash dir is clean
3390 self._wait_for_trash_empty()
3391
f67539c2 3392 def test_subvolume_clone_retain_snapshot_with_snapshots(self):
adb31ebb 3393 """
f67539c2 3394 retain snapshots of a cloned subvolume and check disallowed operations
adb31ebb 3395 """
adb31ebb
TL
3396 subvolume = self._generate_random_subvolume_name()
3397 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
f67539c2 3398 clone = self._generate_random_clone_name()
adb31ebb
TL
3399
3400 # create subvolume
522d829b 3401 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3402
f67539c2
TL
3403 # store path for clone verification
3404 subvol1_path = self._get_subvolume_path(self.volname, subvolume)
3405
3406 # do some IO
3407 self._do_subvolume_io(subvolume, number_of_files=16)
3408
adb31ebb
TL
3409 # snapshot subvolume
3410 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
3411
3412 # remove with snapshot retention
3413 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3414
f67539c2
TL
3415 # clone retained subvolume snapshot
3416 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot1, clone)
adb31ebb 3417
f67539c2
TL
3418 # check clone status
3419 self._wait_for_clone_to_complete(clone)
adb31ebb 3420
f67539c2
TL
3421 # verify clone
3422 self._verify_clone(subvolume, snapshot1, clone, subvol_path=subvol1_path)
adb31ebb 3423
f67539c2
TL
3424 # create a snapshot on the clone
3425 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot2)
adb31ebb 3426
f67539c2
TL
3427 # retain a clone
3428 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
adb31ebb
TL
3429
3430 # list snapshots
f67539c2
TL
3431 clonesnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, clone))
3432 self.assertEqual(len(clonesnapshotls), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
adb31ebb 3433 " created subvolume snapshots")
f67539c2
TL
3434 snapshotnames = [snapshot['name'] for snapshot in clonesnapshotls]
3435 for snap in [snapshot2]:
adb31ebb
TL
3436 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
3437
f67539c2
TL
3438 ## check disallowed operations on retained clone
3439 # clone-status
3440 try:
3441 self._fs_cmd("clone", "status", self.volname, clone)
3442 except CommandFailedError as ce:
3443 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone status of clone with retained snapshots")
3444 else:
3445 self.fail("expected clone status of clone with retained snapshots to fail")
3446
3447 # clone-cancel
3448 try:
3449 self._fs_cmd("clone", "cancel", self.volname, clone)
3450 except CommandFailedError as ce:
3451 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone cancel of clone with retained snapshots")
3452 else:
3453 self.fail("expected clone cancel of clone with retained snapshots to fail")
3454
3455 # remove snapshots (removes subvolumes as all are in retained state)
adb31ebb 3456 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
f67539c2 3457 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot2)
adb31ebb
TL
3458
3459 # verify list subvolumes returns an empty list
3460 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3461 self.assertEqual(len(subvolumels), 0)
3462
3463 # verify trash dir is clean
3464 self._wait_for_trash_empty()
3465
3466 def test_subvolume_retain_snapshot_clone(self):
3467 """
3468 clone a snapshot from a snapshot retained subvolume
3469 """
3470 subvolume = self._generate_random_subvolume_name()
3471 snapshot = self._generate_random_snapshot_name()
3472 clone = self._generate_random_clone_name()
3473
3474 # create subvolume
522d829b 3475 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
3476
3477 # store path for clone verification
3478 subvol_path = self._get_subvolume_path(self.volname, subvolume)
3479
3480 # do some IO
3481 self._do_subvolume_io(subvolume, number_of_files=16)
3482
3483 # snapshot subvolume
3484 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3485
3486 # remove with snapshot retention
3487 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3488
3489 # clone retained subvolume snapshot
3490 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3491
3492 # check clone status
3493 self._wait_for_clone_to_complete(clone)
3494
3495 # verify clone
3496 self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path)
3497
3498 # remove snapshots (removes retained volume)
3499 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3500
3501 # remove subvolume
3502 self._fs_cmd("subvolume", "rm", self.volname, clone)
3503
3504 # verify list subvolumes returns an empty list
3505 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3506 self.assertEqual(len(subvolumels), 0)
92f5a8d4
TL
3507
3508 # verify trash dir is clean
3509 self._wait_for_trash_empty()
3510
f67539c2 3511 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self):
adb31ebb 3512 """
f67539c2 3513 clone a subvolume from recreated subvolume's latest snapshot
adb31ebb
TL
3514 """
3515 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
3516 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
3517 clone = self._generate_random_clone_name(1)
adb31ebb
TL
3518
3519 # create subvolume
522d829b 3520 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 3521
adb31ebb
TL
3522 # do some IO
3523 self._do_subvolume_io(subvolume, number_of_files=16)
3524
3525 # snapshot subvolume
f67539c2 3526 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
adb31ebb
TL
3527
3528 # remove with snapshot retention
3529 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3530
f67539c2 3531 # recreate subvolume
522d829b 3532 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
f67539c2
TL
3533
3534 # get and store path for clone verification
3535 subvol2_path = self._get_subvolume_path(self.volname, subvolume)
3536
3537 # do some IO
3538 self._do_subvolume_io(subvolume, number_of_files=16)
3539
3540 # snapshot newer subvolume
3541 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
3542
3543 # remove with snapshot retention
3544 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3545
3546 # clone retained subvolume's newer snapshot
3547 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot2, clone)
adb31ebb
TL
3548
3549 # check clone status
f67539c2 3550 self._wait_for_clone_to_complete(clone)
adb31ebb
TL
3551
3552 # verify clone
f67539c2 3553 self._verify_clone(subvolume, snapshot2, clone, subvol_path=subvol2_path)
adb31ebb
TL
3554
3555 # remove snapshot
f67539c2
TL
3556 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
3557 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
adb31ebb
TL
3558
3559 # remove subvolume
f67539c2 3560 self._fs_cmd("subvolume", "rm", self.volname, clone)
adb31ebb
TL
3561
3562 # verify list subvolumes returns an empty list
3563 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3564 self.assertEqual(len(subvolumels), 0)
3565
3566 # verify trash dir is clean
3567 self._wait_for_trash_empty()
3568
f67539c2 3569 def test_subvolume_retain_snapshot_recreate(self):
adb31ebb 3570 """
f67539c2 3571 recreate a subvolume from one of its retained snapshots
adb31ebb
TL
3572 """
3573 subvolume = self._generate_random_subvolume_name()
f67539c2 3574 snapshot = self._generate_random_snapshot_name()
adb31ebb
TL
3575
3576 # create subvolume
522d829b 3577 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
3578
3579 # store path for clone verification
f67539c2 3580 subvol_path = self._get_subvolume_path(self.volname, subvolume)
adb31ebb
TL
3581
3582 # do some IO
3583 self._do_subvolume_io(subvolume, number_of_files=16)
3584
3585 # snapshot subvolume
f67539c2 3586 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb
TL
3587
3588 # remove with snapshot retention
3589 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3590
f67539c2
TL
3591 # recreate retained subvolume using its own snapshot to clone
3592 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume)
adb31ebb
TL
3593
3594 # check clone status
f67539c2 3595 self._wait_for_clone_to_complete(subvolume)
adb31ebb
TL
3596
3597 # verify clone
f67539c2 3598 self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path)
adb31ebb 3599
f67539c2
TL
3600 # remove snapshot
3601 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3602
3603 # remove subvolume
3604 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
3605
3606 # verify list subvolumes returns an empty list
3607 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
3608 self.assertEqual(len(subvolumels), 0)
3609
3610 # verify trash dir is clean
3611 self._wait_for_trash_empty()
3612
f67539c2 3613 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self):
adb31ebb 3614 """
f67539c2 3615 ensure retained clone recreate fails if its trash is not yet purged
adb31ebb
TL
3616 """
3617 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
3618 snapshot = self._generate_random_snapshot_name()
3619 clone = self._generate_random_clone_name()
adb31ebb
TL
3620
3621 # create subvolume
3622 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3623
adb31ebb 3624 # snapshot subvolume
f67539c2 3625 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
adb31ebb 3626
f67539c2
TL
3627 # clone subvolume snapshot
3628 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 3629
f67539c2
TL
3630 # check clone status
3631 self._wait_for_clone_to_complete(clone)
adb31ebb 3632
f67539c2
TL
3633 # snapshot clone
3634 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot)
adb31ebb 3635
f67539c2
TL
3636 # remove clone with snapshot retention
3637 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
adb31ebb 3638
f67539c2
TL
3639 # fake a trash entry
3640 self._update_fake_trash(clone)
adb31ebb 3641
f67539c2
TL
3642 # clone subvolume snapshot (recreate)
3643 try:
3644 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3645 except CommandFailedError as ce:
3646 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending")
3647 else:
3648 self.fail("expected recreate of clone with purge pending to fail")
adb31ebb 3649
f67539c2
TL
3650 # clear fake trash entry
3651 self._update_fake_trash(clone, create=False)
3652
3653 # recreate subvolume
3654 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb
TL
3655
3656 # check clone status
3657 self._wait_for_clone_to_complete(clone)
3658
adb31ebb 3659 # remove snapshot
f67539c2
TL
3660 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3661 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot)
adb31ebb
TL
3662
3663 # remove subvolume
f67539c2 3664 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
adb31ebb
TL
3665 self._fs_cmd("subvolume", "rm", self.volname, clone)
3666
adb31ebb
TL
3667 # verify trash dir is clean
3668 self._wait_for_trash_empty()
3669
f67539c2 3670 def test_subvolume_snapshot_attr_clone(self):
92f5a8d4
TL
3671 subvolume = self._generate_random_subvolume_name()
3672 snapshot = self._generate_random_snapshot_name()
3673 clone = self._generate_random_clone_name()
3674
3675 # create subvolume
522d829b 3676 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
3677
3678 # do some IO
f67539c2 3679 self._do_subvolume_io_mixed(subvolume)
92f5a8d4
TL
3680
3681 # snapshot subvolume
3682 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3683
92f5a8d4
TL
3684 # schedule a clone
3685 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3686
92f5a8d4
TL
3687 # check clone status
3688 self._wait_for_clone_to_complete(clone)
3689
adb31ebb
TL
3690 # verify clone
3691 self._verify_clone(subvolume, snapshot, clone)
3692
92f5a8d4
TL
3693 # remove snapshot
3694 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3695
adb31ebb
TL
3696 # remove subvolumes
3697 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3698 self._fs_cmd("subvolume", "rm", self.volname, clone)
3699
3700 # verify trash dir is clean
3701 self._wait_for_trash_empty()
3702
3703 def test_subvolume_snapshot_clone(self):
3704 subvolume = self._generate_random_subvolume_name()
3705 snapshot = self._generate_random_snapshot_name()
3706 clone = self._generate_random_clone_name()
3707
3708 # create subvolume
522d829b 3709 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
3710
3711 # do some IO
3712 self._do_subvolume_io(subvolume, number_of_files=64)
3713
3714 # snapshot subvolume
3715 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3716
3717 # schedule a clone
3718 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3719
3720 # check clone status
3721 self._wait_for_clone_to_complete(clone)
3722
92f5a8d4 3723 # verify clone
adb31ebb
TL
3724 self._verify_clone(subvolume, snapshot, clone)
3725
3726 # remove snapshot
3727 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4
TL
3728
3729 # remove subvolumes
3730 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3731 self._fs_cmd("subvolume", "rm", self.volname, clone)
3732
3733 # verify trash dir is clean
3734 self._wait_for_trash_empty()
3735
f67539c2
TL
3736 def test_subvolume_snapshot_clone_retain_suid_guid(self):
3737 subvolume = self._generate_random_subvolume_name()
3738 snapshot = self._generate_random_snapshot_name()
3739 clone = self._generate_random_clone_name()
f91f0fd5 3740
f67539c2 3741 # create subvolume
522d829b 3742 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
f91f0fd5 3743
f67539c2
TL
3744 # Create a file with suid, guid bits set along with executable bit.
3745 args = ["subvolume", "getpath", self.volname, subvolume]
3746 args = tuple(args)
3747 subvolpath = self._fs_cmd(*args)
3748 self.assertNotEqual(subvolpath, None)
3749 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
f91f0fd5 3750
f67539c2
TL
3751 file_path = subvolpath
3752 file_path = os.path.join(subvolpath, "test_suid_file")
3753 self.mount_a.run_shell(["touch", file_path])
3754 self.mount_a.run_shell(["chmod", "u+sx,g+sx", file_path])
f91f0fd5 3755
f67539c2
TL
3756 # snapshot subvolume
3757 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3758
3759 # schedule a clone
3760 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3761
3762 # check clone status
3763 self._wait_for_clone_to_complete(clone)
3764
3765 # verify clone
3766 self._verify_clone(subvolume, snapshot, clone)
3767
3768 # remove snapshot
3769 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3770
3771 # remove subvolumes
3772 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3773 self._fs_cmd("subvolume", "rm", self.volname, clone)
3774
3775 # verify trash dir is clean
3776 self._wait_for_trash_empty()
3777
3778 def test_subvolume_snapshot_clone_and_reclone(self):
92f5a8d4
TL
3779 subvolume = self._generate_random_subvolume_name()
3780 snapshot = self._generate_random_snapshot_name()
f67539c2 3781 clone1, clone2 = self._generate_random_clone_name(2)
92f5a8d4
TL
3782
3783 # create subvolume
522d829b 3784 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
3785
3786 # do some IO
3787 self._do_subvolume_io(subvolume, number_of_files=32)
3788
3789 # snapshot subvolume
3790 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3791
92f5a8d4 3792 # schedule a clone
f67539c2 3793 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
92f5a8d4
TL
3794
3795 # check clone status
f67539c2 3796 self._wait_for_clone_to_complete(clone1)
92f5a8d4 3797
adb31ebb 3798 # verify clone
f67539c2 3799 self._verify_clone(subvolume, snapshot, clone1)
adb31ebb 3800
92f5a8d4
TL
3801 # remove snapshot
3802 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3803
f67539c2
TL
3804 # now the clone is just like a normal subvolume -- snapshot the clone and fork
3805 # another clone. before that do some IO so it's can be differentiated.
3806 self._do_subvolume_io(clone1, create_dir="data", number_of_files=32)
3807
3808 # snapshot clone -- use same snap name
3809 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot)
3810
3811 # schedule a clone
3812 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2)
3813
3814 # check clone status
3815 self._wait_for_clone_to_complete(clone2)
3816
3817 # verify clone
3818 self._verify_clone(clone1, snapshot, clone2)
3819
3820 # remove snapshot
3821 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot)
92f5a8d4
TL
3822
3823 # remove subvolumes
3824 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
3825 self._fs_cmd("subvolume", "rm", self.volname, clone1)
3826 self._fs_cmd("subvolume", "rm", self.volname, clone2)
92f5a8d4
TL
3827
3828 # verify trash dir is clean
3829 self._wait_for_trash_empty()
3830
f67539c2 3831 def test_subvolume_snapshot_clone_cancel_in_progress(self):
92f5a8d4
TL
3832 subvolume = self._generate_random_subvolume_name()
3833 snapshot = self._generate_random_snapshot_name()
3834 clone = self._generate_random_clone_name()
3835
92f5a8d4 3836 # create subvolume
522d829b 3837 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
3838
3839 # do some IO
f67539c2 3840 self._do_subvolume_io(subvolume, number_of_files=128)
92f5a8d4
TL
3841
3842 # snapshot subvolume
3843 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3844
522d829b
TL
3845 # Insert delay at the beginning of snapshot clone
3846 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
3847
92f5a8d4
TL
3848 # schedule a clone
3849 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
3850
f67539c2
TL
3851 # cancel on-going clone
3852 self._fs_cmd("clone", "cancel", self.volname, clone)
92f5a8d4 3853
f67539c2
TL
3854 # verify canceled state
3855 self._check_clone_canceled(clone)
adb31ebb 3856
92f5a8d4
TL
3857 # remove snapshot
3858 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3859
adb31ebb
TL
3860 # remove subvolumes
3861 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2 3862 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
adb31ebb
TL
3863
3864 # verify trash dir is clean
3865 self._wait_for_trash_empty()
3866
f67539c2
TL
3867 def test_subvolume_snapshot_clone_cancel_pending(self):
3868 """
3869 this test is a bit more involved compared to canceling an in-progress clone.
3870 we'd need to ensure that a to-be canceled clone has still not been picked up
3871 by cloner threads. exploit the fact that clones are picked up in an FCFS
3872 fashion and there are four (4) cloner threads by default. When the number of
3873 cloner threads increase, this test _may_ start tripping -- so, the number of
3874 clone operations would need to be jacked up.
3875 """
3876 # default number of clone threads
3877 NR_THREADS = 4
3878 # good enough for 4 threads
3879 NR_CLONES = 5
3880 # yeh, 1gig -- we need the clone to run for sometime
3881 FILE_SIZE_MB = 1024
3882
adb31ebb
TL
3883 subvolume = self._generate_random_subvolume_name()
3884 snapshot = self._generate_random_snapshot_name()
f67539c2 3885 clones = self._generate_random_clone_name(NR_CLONES)
adb31ebb 3886
f67539c2 3887 # create subvolume
522d829b 3888 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb
TL
3889
3890 # do some IO
f67539c2 3891 self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB)
adb31ebb
TL
3892
3893 # snapshot subvolume
3894 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3895
f67539c2
TL
3896 # schedule clones
3897 for clone in clones:
3898 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
adb31ebb 3899
f67539c2
TL
3900 to_wait = clones[0:NR_THREADS]
3901 to_cancel = clones[NR_THREADS:]
adb31ebb 3902
f67539c2
TL
3903 # cancel pending clones and verify
3904 for clone in to_cancel:
3905 status = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
3906 self.assertEqual(status["status"]["state"], "pending")
3907 self._fs_cmd("clone", "cancel", self.volname, clone)
3908 self._check_clone_canceled(clone)
adb31ebb 3909
f67539c2
TL
3910 # let's cancel on-going clones. handle the case where some of the clones
3911 # _just_ complete
3912 for clone in list(to_wait):
3913 try:
3914 self._fs_cmd("clone", "cancel", self.volname, clone)
3915 to_cancel.append(clone)
3916 to_wait.remove(clone)
3917 except CommandFailedError as ce:
3918 if ce.exitstatus != errno.EINVAL:
3919 raise RuntimeError("invalid error code when cancelling on-going clone")
adb31ebb
TL
3920
3921 # remove snapshot
3922 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4
TL
3923
3924 # remove subvolumes
3925 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
3926 for clone in to_wait:
3927 self._fs_cmd("subvolume", "rm", self.volname, clone)
3928 for clone in to_cancel:
3929 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
92f5a8d4
TL
3930
3931 # verify trash dir is clean
3932 self._wait_for_trash_empty()
3933
f67539c2 3934 def test_subvolume_snapshot_clone_different_groups(self):
92f5a8d4
TL
3935 subvolume = self._generate_random_subvolume_name()
3936 snapshot = self._generate_random_snapshot_name()
f67539c2
TL
3937 clone = self._generate_random_clone_name()
3938 s_group, c_group = self._generate_random_group_name(2)
3939
3940 # create groups
3941 self._fs_cmd("subvolumegroup", "create", self.volname, s_group)
3942 self._fs_cmd("subvolumegroup", "create", self.volname, c_group)
92f5a8d4
TL
3943
3944 # create subvolume
522d829b 3945 self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group, "--mode=777")
92f5a8d4
TL
3946
3947 # do some IO
f67539c2 3948 self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32)
92f5a8d4
TL
3949
3950 # snapshot subvolume
f67539c2 3951 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group)
92f5a8d4 3952
92f5a8d4 3953 # schedule a clone
f67539c2
TL
3954 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
3955 '--group_name', s_group, '--target_group_name', c_group)
92f5a8d4
TL
3956
3957 # check clone status
f67539c2 3958 self._wait_for_clone_to_complete(clone, clone_group=c_group)
92f5a8d4 3959
adb31ebb 3960 # verify clone
f67539c2 3961 self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group)
adb31ebb 3962
92f5a8d4 3963 # remove snapshot
f67539c2 3964 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group)
92f5a8d4 3965
92f5a8d4 3966 # remove subvolumes
f67539c2
TL
3967 self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group)
3968 self._fs_cmd("subvolume", "rm", self.volname, clone, c_group)
3969
3970 # remove groups
3971 self._fs_cmd("subvolumegroup", "rm", self.volname, s_group)
3972 self._fs_cmd("subvolumegroup", "rm", self.volname, c_group)
92f5a8d4
TL
3973
3974 # verify trash dir is clean
3975 self._wait_for_trash_empty()
3976
f67539c2 3977 def test_subvolume_snapshot_clone_fail_with_remove(self):
92f5a8d4
TL
3978 subvolume = self._generate_random_subvolume_name()
3979 snapshot = self._generate_random_snapshot_name()
f67539c2
TL
3980 clone1, clone2 = self._generate_random_clone_name(2)
3981
3982 pool_capacity = 32 * 1024 * 1024
3983 # number of files required to fill up 99% of the pool
3984 nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
92f5a8d4
TL
3985
3986 # create subvolume
522d829b 3987 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
3988
3989 # do some IO
f67539c2 3990 self._do_subvolume_io(subvolume, number_of_files=nr_files)
92f5a8d4
TL
3991
3992 # snapshot subvolume
3993 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3994
f67539c2
TL
3995 # add data pool
3996 new_pool = "new_pool"
3997 self.fs.add_data_pool(new_pool)
3998
3999 self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
4000 "max_bytes", "{0}".format(pool_capacity // 4))
92f5a8d4
TL
4001
4002 # schedule a clone
f67539c2 4003 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
92f5a8d4 4004
f67539c2
TL
4005 # check clone status -- this should dramatically overshoot the pool quota
4006 self._wait_for_clone_to_complete(clone1)
92f5a8d4 4007
adb31ebb 4008 # verify clone
f67539c2
TL
4009 self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool)
4010
4011 # wait a bit so that subsequent I/O will give pool full error
4012 time.sleep(120)
4013
4014 # schedule a clone
4015 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool)
4016
4017 # check clone status
4018 self._wait_for_clone_to_fail(clone2)
adb31ebb 4019
92f5a8d4
TL
4020 # remove snapshot
4021 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4022
92f5a8d4
TL
4023 # remove subvolumes
4024 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
4025 self._fs_cmd("subvolume", "rm", self.volname, clone1)
4026 try:
4027 self._fs_cmd("subvolume", "rm", self.volname, clone2)
4028 except CommandFailedError as ce:
4029 if ce.exitstatus != errno.EAGAIN:
4030 raise RuntimeError("invalid error code when trying to remove failed clone")
4031 else:
4032 raise RuntimeError("expected error when removing a failed clone")
92f5a8d4 4033
f67539c2
TL
4034 # ... and with force, failed clone can be removed
4035 self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force")
92f5a8d4
TL
4036
4037 # verify trash dir is clean
4038 self._wait_for_trash_empty()
4039
f67539c2
TL
4040 def test_subvolume_snapshot_clone_on_existing_subvolumes(self):
4041 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
92f5a8d4
TL
4042 snapshot = self._generate_random_snapshot_name()
4043 clone = self._generate_random_clone_name()
4044
f67539c2 4045 # create subvolumes
522d829b
TL
4046 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--mode=777")
4047 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--mode=777")
92f5a8d4
TL
4048
4049 # do some IO
f67539c2 4050 self._do_subvolume_io(subvolume1, number_of_files=32)
92f5a8d4
TL
4051
4052 # snapshot subvolume
f67539c2 4053 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot)
92f5a8d4 4054
f67539c2
TL
4055 # schedule a clone with target as subvolume2
4056 try:
4057 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2)
4058 except CommandFailedError as ce:
4059 if ce.exitstatus != errno.EEXIST:
4060 raise RuntimeError("invalid error code when cloning to existing subvolume")
4061 else:
4062 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
4063
4064 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
4065
4066 # schedule a clone with target as clone
4067 try:
4068 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
4069 except CommandFailedError as ce:
4070 if ce.exitstatus != errno.EEXIST:
4071 raise RuntimeError("invalid error code when cloning to existing clone")
4072 else:
4073 raise RuntimeError("expected cloning to fail if the target is an existing clone")
92f5a8d4
TL
4074
4075 # check clone status
4076 self._wait_for_clone_to_complete(clone)
4077
adb31ebb 4078 # verify clone
f67539c2 4079 self._verify_clone(subvolume1, snapshot, clone)
adb31ebb 4080
92f5a8d4 4081 # remove snapshot
f67539c2 4082 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot)
92f5a8d4 4083
92f5a8d4 4084 # remove subvolumes
f67539c2
TL
4085 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
4086 self._fs_cmd("subvolume", "rm", self.volname, subvolume2)
92f5a8d4
TL
4087 self._fs_cmd("subvolume", "rm", self.volname, clone)
4088
92f5a8d4
TL
4089 # verify trash dir is clean
4090 self._wait_for_trash_empty()
4091
f67539c2 4092 def test_subvolume_snapshot_clone_pool_layout(self):
92f5a8d4
TL
4093 subvolume = self._generate_random_subvolume_name()
4094 snapshot = self._generate_random_snapshot_name()
4095 clone = self._generate_random_clone_name()
92f5a8d4 4096
f67539c2
TL
4097 # add data pool
4098 new_pool = "new_pool"
4099 newid = self.fs.add_data_pool(new_pool)
92f5a8d4
TL
4100
4101 # create subvolume
522d829b 4102 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
92f5a8d4
TL
4103
4104 # do some IO
f67539c2 4105 self._do_subvolume_io(subvolume, number_of_files=32)
92f5a8d4
TL
4106
4107 # snapshot subvolume
f67539c2 4108 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
92f5a8d4 4109
92f5a8d4 4110 # schedule a clone
f67539c2 4111 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool)
92f5a8d4
TL
4112
4113 # check clone status
f67539c2 4114 self._wait_for_clone_to_complete(clone)
92f5a8d4 4115
adb31ebb 4116 # verify clone
f67539c2 4117 self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool)
adb31ebb 4118
92f5a8d4 4119 # remove snapshot
f67539c2 4120 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
92f5a8d4 4121
f67539c2
TL
4122 subvol_path = self._get_subvolume_path(self.volname, clone)
4123 desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
4124 try:
4125 self.assertEqual(desired_pool, new_pool)
4126 except AssertionError:
4127 self.assertEqual(int(desired_pool), newid) # old kernel returns id
92f5a8d4 4128
f67539c2
TL
4129 # remove subvolumes
4130 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4131 self._fs_cmd("subvolume", "rm", self.volname, clone)
92f5a8d4
TL
4132
4133 # verify trash dir is clean
4134 self._wait_for_trash_empty()
4135
f67539c2 4136 def test_subvolume_snapshot_clone_under_group(self):
92f5a8d4
TL
4137 subvolume = self._generate_random_subvolume_name()
4138 snapshot = self._generate_random_snapshot_name()
4139 clone = self._generate_random_clone_name()
f67539c2 4140 group = self._generate_random_group_name()
92f5a8d4 4141
f67539c2 4142 # create subvolume
522d829b 4143 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
adb31ebb 4144
92f5a8d4 4145 # do some IO
f67539c2 4146 self._do_subvolume_io(subvolume, number_of_files=32)
92f5a8d4
TL
4147
4148 # snapshot subvolume
4149 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4150
f67539c2
TL
4151 # create group
4152 self._fs_cmd("subvolumegroup", "create", self.volname, group)
adb31ebb 4153
92f5a8d4 4154 # schedule a clone
f67539c2 4155 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group)
f6b5b4d7 4156
92f5a8d4 4157 # check clone status
f67539c2 4158 self._wait_for_clone_to_complete(clone, clone_group=group)
92f5a8d4 4159
adb31ebb 4160 # verify clone
f67539c2 4161 self._verify_clone(subvolume, snapshot, clone, clone_group=group)
adb31ebb 4162
92f5a8d4
TL
4163 # remove snapshot
4164 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4165
92f5a8d4
TL
4166 # remove subvolumes
4167 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
4168 self._fs_cmd("subvolume", "rm", self.volname, clone, group)
4169
4170 # remove group
4171 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
92f5a8d4
TL
4172
4173 # verify trash dir is clean
4174 self._wait_for_trash_empty()
4175
f67539c2 4176 def test_subvolume_snapshot_clone_with_attrs(self):
92f5a8d4
TL
4177 subvolume = self._generate_random_subvolume_name()
4178 snapshot = self._generate_random_snapshot_name()
4179 clone = self._generate_random_clone_name()
4180
f67539c2
TL
4181 mode = "777"
4182 uid = "1000"
4183 gid = "1000"
4184 new_uid = "1001"
4185 new_gid = "1001"
4186 new_mode = "700"
4187
92f5a8d4 4188 # create subvolume
f67539c2 4189 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
92f5a8d4
TL
4190
4191 # do some IO
f67539c2 4192 self._do_subvolume_io(subvolume, number_of_files=32)
92f5a8d4
TL
4193
4194 # snapshot subvolume
4195 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4196
f67539c2
TL
4197 # change subvolume attrs (to ensure clone picks up snapshot attrs)
4198 self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode)
92f5a8d4 4199
f67539c2
TL
4200 # schedule a clone
4201 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
92f5a8d4
TL
4202
4203 # check clone status
4204 self._wait_for_clone_to_complete(clone)
4205
adb31ebb
TL
4206 # verify clone
4207 self._verify_clone(subvolume, snapshot, clone)
4208
f6b5b4d7
TL
4209 # remove snapshot
4210 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4211
f6b5b4d7
TL
4212 # remove subvolumes
4213 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4214 self._fs_cmd("subvolume", "rm", self.volname, clone)
4215
4216 # verify trash dir is clean
4217 self._wait_for_trash_empty()
4218
f67539c2
TL
4219 def test_subvolume_snapshot_clone_with_upgrade(self):
4220 """
4221 yet another poor man's upgrade test -- rather than going through a full
4222 upgrade cycle, emulate old types subvolumes by going through the wormhole
4223 and verify clone operation.
4224 further ensure that a legacy volume is not updated to v2, but clone is.
4225 """
f6b5b4d7
TL
4226 subvolume = self._generate_random_subvolume_name()
4227 snapshot = self._generate_random_snapshot_name()
4228 clone = self._generate_random_clone_name()
4229
f67539c2
TL
4230 # emulate a old-fashioned subvolume
4231 createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
522d829b 4232 self.mount_a.run_shell_payload(f"mkdir -p -m 777 {createpath}", sudo=True)
f67539c2
TL
4233
4234 # add required xattrs to subvolume
4235 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
522d829b 4236 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
f6b5b4d7
TL
4237
4238 # do some IO
4239 self._do_subvolume_io(subvolume, number_of_files=64)
4240
4241 # snapshot subvolume
4242 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4243
f67539c2
TL
4244 # ensure metadata file is in legacy location, with required version v1
4245 self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
4246
522d829b
TL
4247 # Insert delay at the beginning of snapshot clone
4248 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4249
f6b5b4d7
TL
4250 # schedule a clone
4251 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4252
4253 # snapshot should not be deletable now
4254 try:
4255 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4256 except CommandFailedError as ce:
4257 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
4258 else:
4259 self.fail("expected removing source snapshot of a clone to fail")
4260
4261 # check clone status
4262 self._wait_for_clone_to_complete(clone)
4263
adb31ebb 4264 # verify clone
f67539c2 4265 self._verify_clone(subvolume, snapshot, clone, source_version=1)
adb31ebb 4266
92f5a8d4
TL
4267 # remove snapshot
4268 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4269
f67539c2
TL
4270 # ensure metadata file is in v2 location, with required version v2
4271 self._assert_meta_location_and_version(self.volname, clone)
4272
92f5a8d4
TL
4273 # remove subvolumes
4274 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4275 self._fs_cmd("subvolume", "rm", self.volname, clone)
4276
4277 # verify trash dir is clean
4278 self._wait_for_trash_empty()
4279
f67539c2
TL
4280 def test_subvolume_snapshot_reconf_max_concurrent_clones(self):
4281 """
4282 Validate 'max_concurrent_clones' config option
4283 """
4284
4285 # get the default number of cloner threads
4286 default_max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4287 self.assertEqual(default_max_concurrent_clones, 4)
4288
4289 # Increase number of cloner threads
4290 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
4291 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4292 self.assertEqual(max_concurrent_clones, 6)
4293
4294 # Decrease number of cloner threads
4295 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
4296 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4297 self.assertEqual(max_concurrent_clones, 2)
4298
522d829b
TL
4299 def test_subvolume_snapshot_config_snapshot_clone_delay(self):
4300 """
4301 Validate 'snapshot_clone_delay' config option
4302 """
4303
4304 # get the default delay before starting the clone
4305 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
4306 self.assertEqual(default_timeout, 0)
4307
4308 # Insert delay of 2 seconds at the beginning of the snapshot clone
4309 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
4310 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
4311 self.assertEqual(default_timeout, 2)
4312
4313 # Decrease number of cloner threads
4314 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
4315 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
4316 self.assertEqual(max_concurrent_clones, 2)
4317
f67539c2 4318 def test_subvolume_under_group_snapshot_clone(self):
92f5a8d4 4319 subvolume = self._generate_random_subvolume_name()
f67539c2 4320 group = self._generate_random_group_name()
92f5a8d4
TL
4321 snapshot = self._generate_random_snapshot_name()
4322 clone = self._generate_random_clone_name()
4323
f67539c2
TL
4324 # create group
4325 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4326
92f5a8d4 4327 # create subvolume
522d829b 4328 self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777")
92f5a8d4
TL
4329
4330 # do some IO
f67539c2 4331 self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32)
92f5a8d4
TL
4332
4333 # snapshot subvolume
f67539c2 4334 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
92f5a8d4 4335
92f5a8d4 4336 # schedule a clone
f67539c2 4337 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group)
92f5a8d4
TL
4338
4339 # check clone status
4340 self._wait_for_clone_to_complete(clone)
4341
adb31ebb 4342 # verify clone
f67539c2 4343 self._verify_clone(subvolume, snapshot, clone, source_group=group)
adb31ebb 4344
92f5a8d4 4345 # remove snapshot
f67539c2 4346 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
92f5a8d4 4347
92f5a8d4 4348 # remove subvolumes
f67539c2 4349 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
92f5a8d4
TL
4350 self._fs_cmd("subvolume", "rm", self.volname, clone)
4351
f67539c2
TL
4352 # remove group
4353 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4354
92f5a8d4
TL
4355 # verify trash dir is clean
4356 self._wait_for_trash_empty()
4357
f67539c2
TL
4358
4359class TestMisc(TestVolumesHelper):
4360 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
4361 def test_connection_expiration(self):
4362 # unmount any cephfs mounts
4363 for i in range(0, self.CLIENTS_REQUIRED):
4364 self.mounts[i].umount_wait()
4365 sessions = self._session_list()
4366 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
4367
4368 # Get the mgr to definitely mount cephfs
92f5a8d4 4369 subvolume = self._generate_random_subvolume_name()
f67539c2
TL
4370 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4371 sessions = self._session_list()
4372 self.assertEqual(len(sessions), 1)
92f5a8d4 4373
f67539c2
TL
4374 # Now wait for the mgr to expire the connection:
4375 self.wait_until_evicted(sessions[0]['id'], timeout=90)
4376
4377 def test_mgr_eviction(self):
4378 # unmount any cephfs mounts
4379 for i in range(0, self.CLIENTS_REQUIRED):
4380 self.mounts[i].umount_wait()
4381 sessions = self._session_list()
4382 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
4383
4384 # Get the mgr to definitely mount cephfs
4385 subvolume = self._generate_random_subvolume_name()
92f5a8d4 4386 self._fs_cmd("subvolume", "create", self.volname, subvolume)
f67539c2
TL
4387 sessions = self._session_list()
4388 self.assertEqual(len(sessions), 1)
92f5a8d4 4389
f67539c2
TL
4390 # Now fail the mgr, check the session was evicted
4391 mgr = self.mgr_cluster.get_active_id()
4392 self.mgr_cluster.mgr_fail(mgr)
4393 self.wait_until_evicted(sessions[0]['id'])
92f5a8d4 4394
f67539c2
TL
4395 def test_names_can_only_be_goodchars(self):
4396 """
4397 Test the creating vols, subvols subvolgroups fails when their names uses
4398 characters beyond [a-zA-Z0-9 -_.].
4399 """
4400 volname, badname = 'testvol', 'abcd@#'
92f5a8d4 4401
f67539c2
TL
4402 with self.assertRaises(CommandFailedError):
4403 self._fs_cmd('volume', 'create', badname)
4404 self._fs_cmd('volume', 'create', volname)
92f5a8d4 4405
f67539c2
TL
4406 with self.assertRaises(CommandFailedError):
4407 self._fs_cmd('subvolumegroup', 'create', volname, badname)
92f5a8d4 4408
f67539c2
TL
4409 with self.assertRaises(CommandFailedError):
4410 self._fs_cmd('subvolume', 'create', volname, badname)
4411 self._fs_cmd('volume', 'rm', volname, '--yes-i-really-mean-it')
92f5a8d4 4412
f67539c2
TL
4413 def test_subvolume_ops_on_nonexistent_vol(self):
4414 # tests the fs subvolume operations on non existing volume
92f5a8d4 4415
f67539c2 4416 volname = "non_existent_subvolume"
92f5a8d4 4417
f67539c2
TL
4418 # try subvolume operations
4419 for op in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
4420 try:
4421 if op == "resize":
4422 self._fs_cmd("subvolume", "resize", volname, "subvolname_1", "inf")
4423 elif op == "pin":
4424 self._fs_cmd("subvolume", "pin", volname, "subvolname_1", "export", "1")
4425 elif op == "ls":
4426 self._fs_cmd("subvolume", "ls", volname)
4427 else:
4428 self._fs_cmd("subvolume", op, volname, "subvolume_1")
4429 except CommandFailedError as ce:
4430 self.assertEqual(ce.exitstatus, errno.ENOENT)
4431 else:
4432 self.fail("expected the 'fs subvolume {0}' command to fail".format(op))
92f5a8d4 4433
f67539c2
TL
4434 # try subvolume snapshot operations and clone create
4435 for op in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
4436 try:
4437 if op == "ls":
4438 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1")
4439 elif op == "clone":
4440 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1", "clone_1")
4441 else:
4442 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1")
4443 except CommandFailedError as ce:
4444 self.assertEqual(ce.exitstatus, errno.ENOENT)
4445 else:
4446 self.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op))
92f5a8d4 4447
f67539c2 4448 # try, clone status
92f5a8d4 4449 try:
f67539c2 4450 self._fs_cmd("clone", "status", volname, "clone_1")
92f5a8d4 4451 except CommandFailedError as ce:
f67539c2 4452 self.assertEqual(ce.exitstatus, errno.ENOENT)
92f5a8d4 4453 else:
f67539c2 4454 self.fail("expected the 'fs clone status' command to fail")
92f5a8d4 4455
f67539c2
TL
4456 # try subvolumegroup operations
4457 for op in ("create", "rm", "getpath", "pin", "ls"):
4458 try:
4459 if op == "pin":
4460 self._fs_cmd("subvolumegroup", "pin", volname, "group_1", "export", "0")
4461 elif op == "ls":
4462 self._fs_cmd("subvolumegroup", op, volname)
4463 else:
4464 self._fs_cmd("subvolumegroup", op, volname, "group_1")
4465 except CommandFailedError as ce:
4466 self.assertEqual(ce.exitstatus, errno.ENOENT)
4467 else:
4468 self.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op))
92f5a8d4 4469
f67539c2
TL
4470 # try subvolumegroup snapshot operations
4471 for op in ("create", "rm", "ls"):
4472 try:
4473 if op == "ls":
4474 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1")
4475 else:
4476 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1", "snapshot_1")
4477 except CommandFailedError as ce:
4478 self.assertEqual(ce.exitstatus, errno.ENOENT)
4479 else:
4480 self.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op))
adb31ebb 4481
f67539c2
TL
4482 def test_subvolume_upgrade_legacy_to_v1(self):
4483 """
4484 poor man's upgrade test -- rather than going through a full upgrade cycle,
4485 emulate subvolumes by going through the wormhole and verify if they are
4486 accessible.
4487 further ensure that a legacy volume is not updated to v2.
4488 """
4489 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
4490 group = self._generate_random_group_name()
92f5a8d4 4491
f67539c2
TL
4492 # emulate a old-fashioned subvolume -- one in the default group and
4493 # the other in a custom group
4494 createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
522d829b 4495 self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
92f5a8d4 4496
f67539c2
TL
4497 # create group
4498 createpath2 = os.path.join(".", "volumes", group, subvolume2)
522d829b 4499 self.mount_a.run_shell(['mkdir', '-p', createpath2], sudo=True)
92f5a8d4 4500
f67539c2
TL
4501 # this would auto-upgrade on access without anyone noticing
4502 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
4503 self.assertNotEqual(subvolpath1, None)
4504 subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
92f5a8d4 4505
f67539c2
TL
4506 subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
4507 self.assertNotEqual(subvolpath2, None)
4508 subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline
92f5a8d4 4509
f67539c2
TL
4510 # and... the subvolume path returned should be what we created behind the scene
4511 self.assertEqual(createpath1[1:], subvolpath1)
4512 self.assertEqual(createpath2[1:], subvolpath2)
92f5a8d4 4513
f67539c2
TL
4514 # ensure metadata file is in legacy location, with required version v1
4515 self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True)
4516 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True)
92f5a8d4 4517
f67539c2
TL
4518 # remove subvolume
4519 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
4520 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
92f5a8d4 4521
f67539c2
TL
4522 # verify trash dir is clean
4523 self._wait_for_trash_empty()
92f5a8d4 4524
f67539c2
TL
4525 # remove group
4526 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
92f5a8d4 4527
f67539c2
TL
4528 def test_subvolume_no_upgrade_v1_sanity(self):
4529 """
4530 poor man's upgrade test -- theme continues...
4531
4532 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
4533 a series of operations on the v1 subvolume to ensure they work as expected.
4534 """
4535 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
4536 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
4537 "type", "uid", "features", "state"]
4538 snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
4539
4540 subvolume = self._generate_random_subvolume_name()
4541 snapshot = self._generate_random_snapshot_name()
4542 clone1, clone2 = self._generate_random_clone_name(2)
4543 mode = "777"
4544 uid = "1000"
4545 gid = "1000"
92f5a8d4 4546
f67539c2
TL
4547 # emulate a v1 subvolume -- in the default group
4548 subvolume_path = self._create_v1_subvolume(subvolume)
92f5a8d4 4549
f67539c2
TL
4550 # getpath
4551 subvolpath = self._get_subvolume_path(self.volname, subvolume)
4552 self.assertEqual(subvolpath, subvolume_path)
92f5a8d4 4553
f67539c2
TL
4554 # ls
4555 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4556 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
4557 self.assertEqual(subvolumes[0]['name'], subvolume,
4558 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
92f5a8d4 4559
f67539c2
TL
4560 # info
4561 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
4562 for md in subvol_md:
4563 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
92f5a8d4 4564
f67539c2
TL
4565 self.assertEqual(subvol_info["state"], "complete",
4566 msg="expected state to be 'complete', found '{0}".format(subvol_info["state"]))
4567 self.assertEqual(len(subvol_info["features"]), 2,
4568 msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
4569 for feature in ['snapshot-clone', 'snapshot-autoprotect']:
4570 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
92f5a8d4 4571
f67539c2
TL
4572 # resize
4573 nsize = self.DEFAULT_FILE_SIZE*1024*1024*10
4574 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
4575 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
4576 for md in subvol_md:
4577 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
4578 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
92f5a8d4 4579
f67539c2
TL
4580 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
4581 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
92f5a8d4 4582
f67539c2
TL
4583 # do some IO
4584 self._do_subvolume_io(subvolume, number_of_files=8)
494da23a 4585
f67539c2
TL
4586 # snap-create
4587 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
9f95a23c 4588
f67539c2
TL
4589 # clone
4590 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
9f95a23c 4591
f67539c2
TL
4592 # check clone status
4593 self._wait_for_clone_to_complete(clone1)
9f95a23c 4594
f67539c2
TL
4595 # ensure clone is v2
4596 self._assert_meta_location_and_version(self.volname, clone1, version=2)
9f95a23c 4597
f67539c2
TL
4598 # verify clone
4599 self._verify_clone(subvolume, snapshot, clone1, source_version=1)
9f95a23c 4600
f67539c2
TL
4601 # clone (older snapshot)
4602 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2)
9f95a23c
TL
4603
4604 # check clone status
f67539c2
TL
4605 self._wait_for_clone_to_complete(clone2)
4606
4607 # ensure clone is v2
4608 self._assert_meta_location_and_version(self.volname, clone2, version=2)
9f95a23c 4609
adb31ebb 4610 # verify clone
f67539c2
TL
4611 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
4612 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
adb31ebb 4613
f67539c2
TL
4614 # snap-info
4615 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
4616 for md in snap_md:
4617 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4618 self.assertEqual(snap_info["has_pending_clones"], "no")
4619
4620 # snap-ls
4621 subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
4622 self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots)))
4623 snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots]
4624 for name in [snapshot, 'fake']:
4625 self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name))
4626
4627 # snap-rm
9f95a23c 4628 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
f67539c2 4629 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake")
9f95a23c 4630
f67539c2
TL
4631 # ensure volume is still at version 1
4632 self._assert_meta_location_and_version(self.volname, subvolume, version=1)
4633
4634 # rm
9f95a23c 4635 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
f67539c2
TL
4636 self._fs_cmd("subvolume", "rm", self.volname, clone1)
4637 self._fs_cmd("subvolume", "rm", self.volname, clone2)
9f95a23c
TL
4638
4639 # verify trash dir is clean
4640 self._wait_for_trash_empty()
4641
f67539c2
TL
4642 def test_subvolume_no_upgrade_v1_to_v2(self):
4643 """
4644 poor man's upgrade test -- theme continues...
4645 ensure v1 to v2 upgrades are not done automatically due to various states of v1
4646 """
4647 subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3)
4648 group = self._generate_random_group_name()
9f95a23c 4649
f67539c2
TL
4650 # emulate a v1 subvolume -- in the default group
4651 subvol1_path = self._create_v1_subvolume(subvolume1)
9f95a23c 4652
f67539c2
TL
4653 # emulate a v1 subvolume -- in a custom group
4654 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group)
9f95a23c 4655
f67539c2
TL
4656 # emulate a v1 subvolume -- in a clone pending state
4657 self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending')
9f95a23c 4658
f67539c2
TL
4659 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
4660 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
4661 self.assertEqual(subvolpath1, subvol1_path)
9f95a23c 4662
f67539c2
TL
4663 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
4664 self.assertEqual(subvolpath2, subvol2_path)
9f95a23c 4665
f67539c2
TL
4666 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
4667 # use clone status, as only certain operations are allowed in pending state
4668 status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3))
4669 self.assertEqual(status["status"]["state"], "pending")
9f95a23c 4670
9f95a23c 4671 # remove snapshot
f67539c2
TL
4672 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake")
4673 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group)
9f95a23c 4674
f67539c2
TL
4675 # ensure metadata file is in v1 location, with version retained as v1
4676 self._assert_meta_location_and_version(self.volname, subvolume1, version=1)
4677 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1)
4678
4679 # remove subvolume
4680 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
4681 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
4682 try:
4683 self._fs_cmd("subvolume", "rm", self.volname, subvolume3)
4684 except CommandFailedError as ce:
4685 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone")
4686 else:
4687 self.fail("expected rm of subvolume undergoing clone to fail")
4688
4689 # ensure metadata file is in v1 location, with version retained as v1
4690 self._assert_meta_location_and_version(self.volname, subvolume3, version=1)
4691 self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force")
4692
4693 # verify list subvolumes returns an empty list
4694 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4695 self.assertEqual(len(subvolumels), 0)
9f95a23c
TL
4696
4697 # verify trash dir is clean
4698 self._wait_for_trash_empty()
4699
f67539c2 4700 def test_subvolume_upgrade_v1_to_v2(self):
9f95a23c 4701 """
f67539c2
TL
4702 poor man's upgrade test -- theme continues...
4703 ensure v1 to v2 upgrades work
9f95a23c 4704 """
f67539c2
TL
4705 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
4706 group = self._generate_random_group_name()
9f95a23c 4707
f67539c2
TL
4708 # emulate a v1 subvolume -- in the default group
4709 subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False)
9f95a23c 4710
f67539c2
TL
4711 # emulate a v1 subvolume -- in a custom group
4712 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False)
9f95a23c 4713
f67539c2
TL
4714 # this would attempt auto-upgrade on access
4715 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
4716 self.assertEqual(subvolpath1, subvol1_path)
9f95a23c 4717
f67539c2
TL
4718 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
4719 self.assertEqual(subvolpath2, subvol2_path)
9f95a23c 4720
f67539c2
TL
4721 # ensure metadata file is in v2 location, with version retained as v2
4722 self._assert_meta_location_and_version(self.volname, subvolume1, version=2)
4723 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2)
9f95a23c 4724
f67539c2
TL
4725 # remove subvolume
4726 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
4727 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
9f95a23c
TL
4728
4729 # verify trash dir is clean
4730 self._wait_for_trash_empty()