]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volumes.py
add stop-gap to fix compat with CPUs not supporting SSE 4.1
[ceph.git] / ceph / qa / tasks / cephfs / test_volumes.py
1 import os
2 import json
3 import time
4 import errno
5 import random
6 import logging
7 import collections
8 import uuid
9 import unittest
10 from hashlib import md5
11 from textwrap import dedent
12 from io import StringIO
13
14 from tasks.cephfs.cephfs_test_case import CephFSTestCase
15 from tasks.cephfs.fuse_mount import FuseMount
16 from teuthology.exceptions import CommandFailedError
17
18 log = logging.getLogger(__name__)
19
20 class TestVolumesHelper(CephFSTestCase):
21 """Helper class for testing FS volume, subvolume group and subvolume operations."""
22 TEST_VOLUME_PREFIX = "volume"
23 TEST_SUBVOLUME_PREFIX="subvolume"
24 TEST_GROUP_PREFIX="group"
25 TEST_SNAPSHOT_PREFIX="snapshot"
26 TEST_CLONE_PREFIX="clone"
27 TEST_FILE_NAME_PREFIX="subvolume_file"
28
29 # for filling subvolume with data
30 CLIENTS_REQUIRED = 2
31 MDSS_REQUIRED = 2
32
33 # io defaults
34 DEFAULT_FILE_SIZE = 1 # MB
35 DEFAULT_NUMBER_OF_FILES = 1024
36
37 def _fs_cmd(self, *args):
38 return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
39
40 def _raw_cmd(self, *args):
41 return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
42
43 def __check_clone_state(self, state, clone, clone_group=None, timo=120):
44 check = 0
45 args = ["clone", "status", self.volname, clone]
46 if clone_group:
47 args.append(clone_group)
48 args = tuple(args)
49 while check < timo:
50 result = json.loads(self._fs_cmd(*args))
51 if result["status"]["state"] == state:
52 break
53 check += 1
54 time.sleep(1)
55 self.assertTrue(check < timo)
56
57 def _get_clone_status(self, clone, clone_group=None):
58 args = ["clone", "status", self.volname, clone]
59 if clone_group:
60 args.append(clone_group)
61 args = tuple(args)
62 result = json.loads(self._fs_cmd(*args))
63 return result
64
65 def _wait_for_clone_to_complete(self, clone, clone_group=None, timo=120):
66 self.__check_clone_state("complete", clone, clone_group, timo)
67
68 def _wait_for_clone_to_fail(self, clone, clone_group=None, timo=120):
69 self.__check_clone_state("failed", clone, clone_group, timo)
70
71 def _wait_for_clone_to_be_in_progress(self, clone, clone_group=None, timo=120):
72 self.__check_clone_state("in-progress", clone, clone_group, timo)
73
74 def _check_clone_canceled(self, clone, clone_group=None):
75 self.__check_clone_state("canceled", clone, clone_group, timo=1)
76
77 def _get_subvolume_snapshot_path(self, subvolume, snapshot, source_group, subvol_path, source_version):
78 if source_version == 2:
79 # v2
80 if subvol_path is not None:
81 (base_path, uuid_str) = os.path.split(subvol_path)
82 else:
83 (base_path, uuid_str) = os.path.split(self._get_subvolume_path(self.volname, subvolume, group_name=source_group))
84 return os.path.join(base_path, ".snap", snapshot, uuid_str)
85
86 # v1
87 base_path = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
88 return os.path.join(base_path, ".snap", snapshot)
89
90 def _verify_clone_attrs(self, source_path, clone_path):
91 path1 = source_path
92 path2 = clone_path
93
94 p = self.mount_a.run_shell(["find", path1])
95 paths = p.stdout.getvalue().strip().split()
96
97 # for each entry in source and clone (sink) verify certain inode attributes:
98 # inode type, mode, ownership, [am]time.
99 for source_path in paths:
100 sink_entry = source_path[len(path1)+1:]
101 sink_path = os.path.join(path2, sink_entry)
102
103 # mode+type
104 sval = int(self.mount_a.run_shell(['stat', '-c' '%f', source_path]).stdout.getvalue().strip(), 16)
105 cval = int(self.mount_a.run_shell(['stat', '-c' '%f', sink_path]).stdout.getvalue().strip(), 16)
106 self.assertEqual(sval, cval)
107
108 # ownership
109 sval = int(self.mount_a.run_shell(['stat', '-c' '%u', source_path]).stdout.getvalue().strip())
110 cval = int(self.mount_a.run_shell(['stat', '-c' '%u', sink_path]).stdout.getvalue().strip())
111 self.assertEqual(sval, cval)
112
113 sval = int(self.mount_a.run_shell(['stat', '-c' '%g', source_path]).stdout.getvalue().strip())
114 cval = int(self.mount_a.run_shell(['stat', '-c' '%g', sink_path]).stdout.getvalue().strip())
115 self.assertEqual(sval, cval)
116
117 # inode timestamps
118 # do not check access as kclient will generally not update this like ceph-fuse will.
119 sval = int(self.mount_a.run_shell(['stat', '-c' '%Y', source_path]).stdout.getvalue().strip())
120 cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip())
121 self.assertEqual(sval, cval)
122
123 def _verify_clone_root(self, source_path, clone_path, clone, clone_group, clone_pool):
124 # verifies following clone root attrs quota, data_pool and pool_namespace
125 # remaining attributes of clone root are validated in _verify_clone_attrs
126
127 clone_info = json.loads(self._get_subvolume_info(self.volname, clone, clone_group))
128
129 # verify quota is inherited from source snapshot
130 src_quota = self.mount_a.getfattr(source_path, "ceph.quota.max_bytes")
131 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
132 if isinstance(self.mount_a, FuseMount):
133 self.assertEqual(clone_info["bytes_quota"], "infinite" if src_quota is None else int(src_quota))
134
135 if clone_pool:
136 # verify pool is set as per request
137 self.assertEqual(clone_info["data_pool"], clone_pool)
138 else:
139 # verify pool and pool namespace are inherited from snapshot
140 self.assertEqual(clone_info["data_pool"],
141 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool"))
142 self.assertEqual(clone_info["pool_namespace"],
143 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool_namespace"))
144
145 def _verify_clone(self, subvolume, snapshot, clone,
146 source_group=None, clone_group=None, clone_pool=None,
147 subvol_path=None, source_version=2, timo=120):
148 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
149 # but snapshots are retained for clone verification
150 path1 = self._get_subvolume_snapshot_path(subvolume, snapshot, source_group, subvol_path, source_version)
151 path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group)
152
153 check = 0
154 # TODO: currently snapshot rentries are not stable if snapshot source entries
155 # are removed, https://tracker.ceph.com/issues/46747
156 while check < timo and subvol_path is None:
157 val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries"))
158 val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries"))
159 if val1 == val2:
160 break
161 check += 1
162 time.sleep(1)
163 self.assertTrue(check < timo)
164
165 self._verify_clone_root(path1, path2, clone, clone_group, clone_pool)
166 self._verify_clone_attrs(path1, path2)
167
168 def _generate_random_volume_name(self, count=1):
169 n = self.volume_start
170 volumes = [f"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
171 self.volume_start += count
172 return volumes[0] if count == 1 else volumes
173
174 def _generate_random_subvolume_name(self, count=1):
175 n = self.subvolume_start
176 subvolumes = [f"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
177 self.subvolume_start += count
178 return subvolumes[0] if count == 1 else subvolumes
179
180 def _generate_random_group_name(self, count=1):
181 n = self.group_start
182 groups = [f"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i in range(n, n+count)]
183 self.group_start += count
184 return groups[0] if count == 1 else groups
185
186 def _generate_random_snapshot_name(self, count=1):
187 n = self.snapshot_start
188 snaps = [f"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i in range(n, n+count)]
189 self.snapshot_start += count
190 return snaps[0] if count == 1 else snaps
191
192 def _generate_random_clone_name(self, count=1):
193 n = self.clone_start
194 clones = [f"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i in range(n, n+count)]
195 self.clone_start += count
196 return clones[0] if count == 1 else clones
197
198 def _enable_multi_fs(self):
199 self._fs_cmd("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
200
201 def _create_or_reuse_test_volume(self):
202 result = json.loads(self._fs_cmd("volume", "ls"))
203 if len(result) == 0:
204 self.vol_created = True
205 self.volname = self._generate_random_volume_name()
206 self._fs_cmd("volume", "create", self.volname)
207 else:
208 self.volname = result[0]['name']
209
210 def _get_volume_info(self, vol_name, human_readable=False):
211 if human_readable:
212 args = ["volume", "info", vol_name, human_readable]
213 else:
214 args = ["volume", "info", vol_name]
215 args = tuple(args)
216 vol_md = self._fs_cmd(*args)
217 return vol_md
218
219 def _get_subvolume_group_path(self, vol_name, group_name):
220 args = ("subvolumegroup", "getpath", vol_name, group_name)
221 path = self._fs_cmd(*args)
222 # remove the leading '/', and trailing whitespaces
223 return path[1:].rstrip()
224
225 def _get_subvolume_group_info(self, vol_name, group_name):
226 args = ["subvolumegroup", "info", vol_name, group_name]
227 args = tuple(args)
228 group_md = self._fs_cmd(*args)
229 return group_md
230
231 def _get_subvolume_path(self, vol_name, subvol_name, group_name=None):
232 args = ["subvolume", "getpath", vol_name, subvol_name]
233 if group_name:
234 args.append(group_name)
235 args = tuple(args)
236 path = self._fs_cmd(*args)
237 # remove the leading '/', and trailing whitespaces
238 return path[1:].rstrip()
239
240 def _get_subvolume_info(self, vol_name, subvol_name, group_name=None):
241 args = ["subvolume", "info", vol_name, subvol_name]
242 if group_name:
243 args.append(group_name)
244 args = tuple(args)
245 subvol_md = self._fs_cmd(*args)
246 return subvol_md
247
248 def _get_subvolume_snapshot_info(self, vol_name, subvol_name, snapname, group_name=None):
249 args = ["subvolume", "snapshot", "info", vol_name, subvol_name, snapname]
250 if group_name:
251 args.append(group_name)
252 args = tuple(args)
253 snap_md = self._fs_cmd(*args)
254 return snap_md
255
256 def _delete_test_volume(self):
257 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
258
259 def _do_subvolume_pool_and_namespace_update(self, subvolume, pool=None, pool_namespace=None, subvolume_group=None):
260 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
261
262 if pool is not None:
263 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool, sudo=True)
264
265 if pool_namespace is not None:
266 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace, sudo=True)
267
268 def _do_subvolume_attr_update(self, subvolume, uid, gid, mode, subvolume_group=None):
269 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
270
271 # mode
272 self.mount_a.run_shell(['sudo', 'chmod', mode, subvolpath], omit_sudo=False)
273
274 # ownership
275 self.mount_a.run_shell(['sudo', 'chown', uid, subvolpath], omit_sudo=False)
276 self.mount_a.run_shell(['sudo', 'chgrp', gid, subvolpath], omit_sudo=False)
277
278 def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None,
279 number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE):
280 # get subvolume path for IO
281 args = ["subvolume", "getpath", self.volname, subvolume]
282 if subvolume_group:
283 args.append(subvolume_group)
284 args = tuple(args)
285 subvolpath = self._fs_cmd(*args)
286 self.assertNotEqual(subvolpath, None)
287 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
288
289 io_path = subvolpath
290 if create_dir:
291 io_path = os.path.join(subvolpath, create_dir)
292 self.mount_a.run_shell_payload(f"mkdir -p {io_path}")
293
294 log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path))
295 for i in range(number_of_files):
296 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
297 self.mount_a.write_n_mb(os.path.join(io_path, filename), file_size)
298
299 def _do_subvolume_io_mixed(self, subvolume, subvolume_group=None):
300 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
301
302 reg_file = "regfile.0"
303 dir_path = os.path.join(subvolpath, "dir.0")
304 sym_path1 = os.path.join(subvolpath, "sym.0")
305 # this symlink's ownership would be changed
306 sym_path2 = os.path.join(dir_path, "sym.0")
307
308 self.mount_a.run_shell(["mkdir", dir_path])
309 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path1])
310 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path2])
311 # flip ownership to nobody. assumption: nobody's id is 65534
312 self.mount_a.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2], omit_sudo=False)
313
314 def _wait_for_trash_empty(self, timeout=60):
315 # XXX: construct the trash dir path (note that there is no mgr
316 # [sub]volume interface for this).
317 trashdir = os.path.join("./", "volumes", "_deleting")
318 self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
319
320 def _wait_for_subvol_trash_empty(self, subvol, group="_nogroup", timeout=30):
321 trashdir = os.path.join("./", "volumes", group, subvol, ".trash")
322 try:
323 self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
324 except CommandFailedError as ce:
325 if ce.exitstatus != errno.ENOENT:
326 pass
327 else:
328 raise
329
330 def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False):
331 if legacy:
332 subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group)
333 m = md5()
334 m.update(("/"+subvol_path).encode('utf-8'))
335 meta_filename = "{0}.meta".format(m.digest().hex())
336 metapath = os.path.join(".", "volumes", "_legacy", meta_filename)
337 else:
338 group = subvol_group if subvol_group is not None else '_nogroup'
339 metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
340
341 out = self.mount_a.run_shell(['sudo', 'cat', metapath], omit_sudo=False)
342 lines = out.stdout.getvalue().strip().split('\n')
343 sv_version = -1
344 for line in lines:
345 if line == "version = " + str(version):
346 sv_version = version
347 break
348 self.assertEqual(sv_version, version, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
349 version, sv_version, metapath))
350
351 def _create_v1_subvolume(self, subvol_name, subvol_group=None, has_snapshot=True, subvol_type='subvolume', state='complete'):
352 group = subvol_group if subvol_group is not None else '_nogroup'
353 basepath = os.path.join("volumes", group, subvol_name)
354 uuid_str = str(uuid.uuid4())
355 createpath = os.path.join(basepath, uuid_str)
356 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
357
358 # create a v1 snapshot, to prevent auto upgrades
359 if has_snapshot:
360 snappath = os.path.join(createpath, ".snap", "fake")
361 self.mount_a.run_shell(['sudo', 'mkdir', '-p', snappath], omit_sudo=False)
362
363 # add required xattrs to subvolume
364 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
365 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
366
367 # create a v1 .meta file
368 meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, "/" + createpath, state)
369 if state == 'pending':
370 # add a fake clone source
371 meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
372 meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta")
373 self.mount_a.client_remote.write_file(meta_filepath1, meta_contents, sudo=True)
374 return createpath
375
376 def _update_fake_trash(self, subvol_name, subvol_group=None, trash_name='fake', create=True):
377 group = subvol_group if subvol_group is not None else '_nogroup'
378 trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name)
379 if create:
380 self.mount_a.run_shell(['sudo', 'mkdir', '-p', trashpath], omit_sudo=False)
381 else:
382 self.mount_a.run_shell(['sudo', 'rmdir', trashpath], omit_sudo=False)
383
384 def _configure_guest_auth(self, guest_mount, authid, key):
385 """
386 Set up auth credentials for a guest client.
387 """
388 # Create keyring file for the guest client.
389 keyring_txt = dedent("""
390 [client.{authid}]
391 key = {key}
392
393 """.format(authid=authid,key=key))
394
395 guest_mount.client_id = authid
396 guest_mount.client_remote.write_file(guest_mount.get_keyring_path(),
397 keyring_txt, sudo=True)
398 # Add a guest client section to the ceph config file.
399 self.config_set("client.{0}".format(authid), "debug client", 20)
400 self.config_set("client.{0}".format(authid), "debug objecter", 20)
401 self.set_conf("client.{0}".format(authid),
402 "keyring", guest_mount.get_keyring_path())
403
404 def _auth_metadata_get(self, filedata):
405 """
406 Return a deserialized JSON object, or None
407 """
408 try:
409 data = json.loads(filedata)
410 except json.decoder.JSONDecodeError:
411 data = None
412 return data
413
414 def setUp(self):
415 super(TestVolumesHelper, self).setUp()
416 self.volname = None
417 self.vol_created = False
418 self._enable_multi_fs()
419 self._create_or_reuse_test_volume()
420 self.config_set('mon', 'mon_allow_pool_delete', True)
421 self.volume_start = random.randint(1, (1<<20))
422 self.subvolume_start = random.randint(1, (1<<20))
423 self.group_start = random.randint(1, (1<<20))
424 self.snapshot_start = random.randint(1, (1<<20))
425 self.clone_start = random.randint(1, (1<<20))
426
427 def tearDown(self):
428 if self.vol_created:
429 self._delete_test_volume()
430 super(TestVolumesHelper, self).tearDown()
431
432
433 class TestVolumes(TestVolumesHelper):
434 """Tests for FS volume operations."""
435 def test_volume_create(self):
436 """
437 That the volume can be created and then cleans up
438 """
439 volname = self._generate_random_volume_name()
440 self._fs_cmd("volume", "create", volname)
441 volumels = json.loads(self._fs_cmd("volume", "ls"))
442
443 if not (volname in ([volume['name'] for volume in volumels])):
444 raise RuntimeError("Error creating volume '{0}'".format(volname))
445 else:
446 # clean up
447 self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
448
449 def test_volume_ls(self):
450 """
451 That the existing and the newly created volumes can be listed and
452 finally cleans up.
453 """
454 vls = json.loads(self._fs_cmd("volume", "ls"))
455 volumes = [volume['name'] for volume in vls]
456
457 #create new volumes and add it to the existing list of volumes
458 volumenames = self._generate_random_volume_name(2)
459 for volumename in volumenames:
460 self._fs_cmd("volume", "create", volumename)
461 volumes.extend(volumenames)
462
463 # list volumes
464 try:
465 volumels = json.loads(self._fs_cmd('volume', 'ls'))
466 if len(volumels) == 0:
467 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
468 else:
469 volnames = [volume['name'] for volume in volumels]
470 if collections.Counter(volnames) != collections.Counter(volumes):
471 raise RuntimeError("Error creating or listing volumes")
472 finally:
473 # clean up
474 for volume in volumenames:
475 self._fs_cmd("volume", "rm", volume, "--yes-i-really-mean-it")
476
477 def test_volume_rm(self):
478 """
479 That the volume can only be removed when --yes-i-really-mean-it is used
480 and verify that the deleted volume is not listed anymore.
481 """
482 for m in self.mounts:
483 m.umount_wait()
484 try:
485 self._fs_cmd("volume", "rm", self.volname)
486 except CommandFailedError as ce:
487 if ce.exitstatus != errno.EPERM:
488 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
489 "but it failed with {0}".format(ce.exitstatus))
490 else:
491 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
492
493 #check if it's gone
494 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
495 if (self.volname in [volume['name'] for volume in volumes]):
496 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
497 "The volume {0} not removed.".format(self.volname))
498 else:
499 raise RuntimeError("expected the 'fs volume rm' command to fail.")
500
501 def test_volume_rm_arbitrary_pool_removal(self):
502 """
503 That the arbitrary pool added to the volume out of band is removed
504 successfully on volume removal.
505 """
506 for m in self.mounts:
507 m.umount_wait()
508 new_pool = "new_pool"
509 # add arbitrary data pool
510 self.fs.add_data_pool(new_pool)
511 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
512 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
513
514 #check if fs is gone
515 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
516 volnames = [volume['name'] for volume in volumes]
517 self.assertNotIn(self.volname, volnames)
518
519 #check if osd pools are gone
520 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
521 for pool in vol_status["pools"]:
522 self.assertNotIn(pool["name"], pools)
523
524 def test_volume_rm_when_mon_delete_pool_false(self):
525 """
526 That the volume can only be removed when mon_allowd_pool_delete is set
527 to true and verify that the pools are removed after volume deletion.
528 """
529 for m in self.mounts:
530 m.umount_wait()
531 self.config_set('mon', 'mon_allow_pool_delete', False)
532 try:
533 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
534 except CommandFailedError as ce:
535 self.assertEqual(ce.exitstatus, errno.EPERM,
536 "expected the 'fs volume rm' command to fail with EPERM, "
537 "but it failed with {0}".format(ce.exitstatus))
538 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
539 self.config_set('mon', 'mon_allow_pool_delete', True)
540 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
541
542 #check if fs is gone
543 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
544 volnames = [volume['name'] for volume in volumes]
545 self.assertNotIn(self.volname, volnames,
546 "volume {0} exists after removal".format(self.volname))
547 #check if pools are gone
548 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
549 for pool in vol_status["pools"]:
550 self.assertNotIn(pool["name"], pools,
551 "pool {0} exists after volume removal".format(pool["name"]))
552
553 def test_volume_rename(self):
554 """
555 That volume, its file system and pools, can be renamed.
556 """
557 for m in self.mounts:
558 m.umount_wait()
559 oldvolname = self.volname
560 newvolname = self._generate_random_volume_name()
561 new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
562 self._fs_cmd("volume", "rename", oldvolname, newvolname,
563 "--yes-i-really-mean-it")
564 volumels = json.loads(self._fs_cmd('volume', 'ls'))
565 volnames = [volume['name'] for volume in volumels]
566 # volume name changed
567 self.assertIn(newvolname, volnames)
568 self.assertNotIn(oldvolname, volnames)
569 # pool names changed
570 self.fs.get_pool_names(refresh=True)
571 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
572 self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
573
574 def test_volume_rename_idempotency(self):
575 """
576 That volume rename is idempotent.
577 """
578 for m in self.mounts:
579 m.umount_wait()
580 oldvolname = self.volname
581 newvolname = self._generate_random_volume_name()
582 new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
583 self._fs_cmd("volume", "rename", oldvolname, newvolname,
584 "--yes-i-really-mean-it")
585 self._fs_cmd("volume", "rename", oldvolname, newvolname,
586 "--yes-i-really-mean-it")
587 volumels = json.loads(self._fs_cmd('volume', 'ls'))
588 volnames = [volume['name'] for volume in volumels]
589 self.assertIn(newvolname, volnames)
590 self.assertNotIn(oldvolname, volnames)
591 self.fs.get_pool_names(refresh=True)
592 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
593 self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
594
595 def test_volume_rename_fails_without_confirmation_flag(self):
596 """
597 That renaming volume fails without --yes-i-really-mean-it flag.
598 """
599 newvolname = self._generate_random_volume_name()
600 try:
601 self._fs_cmd("volume", "rename", self.volname, newvolname)
602 except CommandFailedError as ce:
603 self.assertEqual(ce.exitstatus, errno.EPERM,
604 "invalid error code on renaming a FS volume without the "
605 "'--yes-i-really-mean-it' flag")
606 else:
607 self.fail("expected renaming of FS volume to fail without the "
608 "'--yes-i-really-mean-it' flag")
609
610 def test_volume_rename_for_more_than_one_data_pool(self):
611 """
612 That renaming a volume with more than one data pool does not change
613 the name of the data pools.
614 """
615 for m in self.mounts:
616 m.umount_wait()
617 self.fs.add_data_pool('another-data-pool')
618 oldvolname = self.volname
619 newvolname = self._generate_random_volume_name()
620 self.fs.get_pool_names(refresh=True)
621 orig_data_pool_names = list(self.fs.data_pools.values())
622 new_metadata_pool = f"cephfs.{newvolname}.meta"
623 self._fs_cmd("volume", "rename", self.volname, newvolname,
624 "--yes-i-really-mean-it")
625 volumels = json.loads(self._fs_cmd('volume', 'ls'))
626 volnames = [volume['name'] for volume in volumels]
627 # volume name changed
628 self.assertIn(newvolname, volnames)
629 self.assertNotIn(oldvolname, volnames)
630 self.fs.get_pool_names(refresh=True)
631 # metadata pool name changed
632 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
633 # data pool names unchanged
634 self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values()))
635
636 def test_volume_info(self):
637 """
638 Tests the 'fs volume info' command
639 """
640 vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
641 group = self._generate_random_group_name()
642 # create subvolumegroup
643 self._fs_cmd("subvolumegroup", "create", self.volname, group)
644 # get volume metadata
645 vol_info = json.loads(self._get_volume_info(self.volname))
646 for md in vol_fields:
647 self.assertIn(md, vol_info,
648 f"'{md}' key not present in metadata of volume")
649 self.assertEqual(vol_info["used_size"], 0,
650 "Size should be zero when volumes directory is empty")
651
652 def test_volume_info_without_subvolumegroup(self):
653 """
654 Tests the 'fs volume info' command without subvolume group
655 """
656 vol_fields = ["pools", "mon_addrs"]
657 # get volume metadata
658 vol_info = json.loads(self._get_volume_info(self.volname))
659 for md in vol_fields:
660 self.assertIn(md, vol_info,
661 f"'{md}' key not present in metadata of volume")
662 self.assertNotIn("used_size", vol_info,
663 "'used_size' should not be present in absence of subvolumegroup")
664 self.assertNotIn("pending_subvolume_deletions", vol_info,
665 "'pending_subvolume_deletions' should not be present in absence"
666 " of subvolumegroup")
667
668 def test_volume_info_with_human_readable_flag(self):
669 """
670 Tests the 'fs volume info --human_readable' command
671 """
672 vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
673 group = self._generate_random_group_name()
674 # create subvolumegroup
675 self._fs_cmd("subvolumegroup", "create", self.volname, group)
676 # get volume metadata
677 vol_info = json.loads(self._get_volume_info(self.volname, "--human_readable"))
678 for md in vol_fields:
679 self.assertIn(md, vol_info,
680 f"'{md}' key not present in metadata of volume")
681 units = [' ', 'k', 'M', 'G', 'T', 'P', 'E']
682 assert vol_info["used_size"][-1] in units, "unit suffix in used_size is absent"
683 assert vol_info["pools"]["data"][0]["avail"][-1] in units, "unit suffix in avail data is absent"
684 assert vol_info["pools"]["data"][0]["used"][-1] in units, "unit suffix in used data is absent"
685 assert vol_info["pools"]["metadata"][0]["avail"][-1] in units, "unit suffix in avail metadata is absent"
686 assert vol_info["pools"]["metadata"][0]["used"][-1] in units, "unit suffix in used metadata is absent"
687 self.assertEqual(int(vol_info["used_size"]), 0,
688 "Size should be zero when volumes directory is empty")
689
690 def test_volume_info_with_human_readable_flag_without_subvolumegroup(self):
691 """
692 Tests the 'fs volume info --human_readable' command without subvolume group
693 """
694 vol_fields = ["pools", "mon_addrs"]
695 # get volume metadata
696 vol_info = json.loads(self._get_volume_info(self.volname, "--human_readable"))
697 for md in vol_fields:
698 self.assertIn(md, vol_info,
699 f"'{md}' key not present in metadata of volume")
700 units = [' ', 'k', 'M', 'G', 'T', 'P', 'E']
701 assert vol_info["pools"]["data"][0]["avail"][-1] in units, "unit suffix in avail data is absent"
702 assert vol_info["pools"]["data"][0]["used"][-1] in units, "unit suffix in used data is absent"
703 assert vol_info["pools"]["metadata"][0]["avail"][-1] in units, "unit suffix in avail metadata is absent"
704 assert vol_info["pools"]["metadata"][0]["used"][-1] in units, "unit suffix in used metadata is absent"
705 self.assertNotIn("used_size", vol_info,
706 "'used_size' should not be present in absence of subvolumegroup")
707 self.assertNotIn("pending_subvolume_deletions", vol_info,
708 "'pending_subvolume_deletions' should not be present in absence"
709 " of subvolumegroup")
710
711
712 class TestSubvolumeGroups(TestVolumesHelper):
713 """Tests for FS subvolume group operations."""
714 def test_default_uid_gid_subvolume_group(self):
715 group = self._generate_random_group_name()
716 expected_uid = 0
717 expected_gid = 0
718
719 # create group
720 self._fs_cmd("subvolumegroup", "create", self.volname, group)
721 group_path = self._get_subvolume_group_path(self.volname, group)
722
723 # check group's uid and gid
724 stat = self.mount_a.stat(group_path)
725 self.assertEqual(stat['st_uid'], expected_uid)
726 self.assertEqual(stat['st_gid'], expected_gid)
727
728 # remove group
729 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
730
731 def test_nonexistent_subvolume_group_create(self):
732 subvolume = self._generate_random_subvolume_name()
733 group = "non_existent_group"
734
735 # try, creating subvolume in a nonexistent group
736 try:
737 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
738 except CommandFailedError as ce:
739 if ce.exitstatus != errno.ENOENT:
740 raise
741 else:
742 raise RuntimeError("expected the 'fs subvolume create' command to fail")
743
744 def test_nonexistent_subvolume_group_rm(self):
745 group = "non_existent_group"
746
747 # try, remove subvolume group
748 try:
749 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
750 except CommandFailedError as ce:
751 if ce.exitstatus != errno.ENOENT:
752 raise
753 else:
754 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
755
756 def test_subvolume_group_create_with_auto_cleanup_on_fail(self):
757 group = self._generate_random_group_name()
758 data_pool = "invalid_pool"
759 # create group with invalid data pool layout
760 with self.assertRaises(CommandFailedError):
761 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
762
763 # check whether group path is cleaned up
764 try:
765 self._fs_cmd("subvolumegroup", "getpath", self.volname, group)
766 except CommandFailedError as ce:
767 if ce.exitstatus != errno.ENOENT:
768 raise
769 else:
770 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
771
772 def test_subvolume_group_create_with_desired_data_pool_layout(self):
773 group1, group2 = self._generate_random_group_name(2)
774
775 # create group
776 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
777 group1_path = self._get_subvolume_group_path(self.volname, group1)
778
779 default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool")
780 new_pool = "new_pool"
781 self.assertNotEqual(default_pool, new_pool)
782
783 # add data pool
784 newid = self.fs.add_data_pool(new_pool)
785
786 # create group specifying the new data pool as its pool layout
787 self._fs_cmd("subvolumegroup", "create", self.volname, group2,
788 "--pool_layout", new_pool)
789 group2_path = self._get_subvolume_group_path(self.volname, group2)
790
791 desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool")
792 try:
793 self.assertEqual(desired_pool, new_pool)
794 except AssertionError:
795 self.assertEqual(int(desired_pool), newid) # old kernel returns id
796
797 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
798 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
799
800 def test_subvolume_group_create_with_desired_mode(self):
801 group1, group2 = self._generate_random_group_name(2)
802 # default mode
803 expected_mode1 = "755"
804 # desired mode
805 expected_mode2 = "777"
806
807 # create group
808 self._fs_cmd("subvolumegroup", "create", self.volname, group2, f"--mode={expected_mode2}")
809 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
810
811 group1_path = self._get_subvolume_group_path(self.volname, group1)
812 group2_path = self._get_subvolume_group_path(self.volname, group2)
813 volumes_path = os.path.dirname(group1_path)
814
815 # check group's mode
816 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip()
817 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip()
818 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
819 self.assertEqual(actual_mode1, expected_mode1)
820 self.assertEqual(actual_mode2, expected_mode2)
821 self.assertEqual(actual_mode3, expected_mode1)
822
823 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
824 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
825
826 def test_subvolume_group_create_with_desired_uid_gid(self):
827 """
828 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
829 expected values.
830 """
831 uid = 1000
832 gid = 1000
833
834 # create subvolume group
835 subvolgroupname = self._generate_random_group_name()
836 self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid))
837
838 # make sure it exists
839 subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname)
840 self.assertNotEqual(subvolgrouppath, None)
841
842 # verify the uid and gid
843 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip())
844 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip())
845 self.assertEqual(uid, suid)
846 self.assertEqual(gid, sgid)
847
848 # remove group
849 self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname)
850
851 def test_subvolume_group_create_with_invalid_data_pool_layout(self):
852 group = self._generate_random_group_name()
853 data_pool = "invalid_pool"
854 # create group with invalid data pool layout
855 try:
856 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
857 except CommandFailedError as ce:
858 if ce.exitstatus != errno.EINVAL:
859 raise
860 else:
861 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
862
863 def test_subvolume_group_create_with_size(self):
864 # create group with size -- should set quota
865 group = self._generate_random_group_name()
866 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
867
868 # get group metadata
869 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
870 self.assertEqual(group_info["bytes_quota"], 1000000000)
871
872 # remove group
873 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
874
875 def test_subvolume_group_info(self):
876 # tests the 'fs subvolumegroup info' command
877
878 group_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
879 "data_pool", "gid", "mode", "mon_addrs", "mtime", "uid"]
880
881 # create group
882 group = self._generate_random_group_name()
883 self._fs_cmd("subvolumegroup", "create", self.volname, group)
884
885 # get group metadata
886 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
887 for md in group_md:
888 self.assertIn(md, group_info, "'{0}' key not present in metadata of group".format(md))
889
890 self.assertEqual(group_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
891 self.assertEqual(group_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
892 self.assertEqual(group_info["uid"], 0)
893 self.assertEqual(group_info["gid"], 0)
894
895 nsize = self.DEFAULT_FILE_SIZE*1024*1024
896 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
897
898 # get group metadata after quota set
899 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
900 for md in group_md:
901 self.assertIn(md, group_info, "'{0}' key not present in metadata of subvolume".format(md))
902
903 self.assertNotEqual(group_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is set")
904 self.assertEqual(group_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
905
906 # remove group
907 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
908
909 def test_subvolume_group_create_idempotence(self):
910 # create group
911 group = self._generate_random_group_name()
912 self._fs_cmd("subvolumegroup", "create", self.volname, group)
913
914 # try creating w/ same subvolume group name -- should be idempotent
915 self._fs_cmd("subvolumegroup", "create", self.volname, group)
916
917 # remove group
918 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
919
920 def test_subvolume_group_create_idempotence_mode(self):
921 # create group
922 group = self._generate_random_group_name()
923 self._fs_cmd("subvolumegroup", "create", self.volname, group)
924
925 # try creating w/ same subvolume group name with mode -- should set mode
926 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=766")
927
928 group_path = self._get_subvolume_group_path(self.volname, group)
929
930 # check subvolumegroup's mode
931 mode = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip()
932 self.assertEqual(mode, "766")
933
934 # remove group
935 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
936
937 def test_subvolume_group_create_idempotence_uid_gid(self):
938 desired_uid = 1000
939 desired_gid = 1000
940
941 # create group
942 group = self._generate_random_group_name()
943 self._fs_cmd("subvolumegroup", "create", self.volname, group)
944
945 # try creating w/ same subvolume group name with uid/gid -- should set uid/gid
946 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--uid", str(desired_uid), "--gid", str(desired_gid))
947
948 group_path = self._get_subvolume_group_path(self.volname, group)
949
950 # verify the uid and gid
951 actual_uid = int(self.mount_a.run_shell(['stat', '-c' '%u', group_path]).stdout.getvalue().strip())
952 actual_gid = int(self.mount_a.run_shell(['stat', '-c' '%g', group_path]).stdout.getvalue().strip())
953 self.assertEqual(desired_uid, actual_uid)
954 self.assertEqual(desired_gid, actual_gid)
955
956 # remove group
957 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
958
959 def test_subvolume_group_create_idempotence_data_pool(self):
960 # create group
961 group = self._generate_random_group_name()
962 self._fs_cmd("subvolumegroup", "create", self.volname, group)
963
964 group_path = self._get_subvolume_group_path(self.volname, group)
965
966 default_pool = self.mount_a.getfattr(group_path, "ceph.dir.layout.pool")
967 new_pool = "new_pool"
968 self.assertNotEqual(default_pool, new_pool)
969
970 # add data pool
971 newid = self.fs.add_data_pool(new_pool)
972
973 # try creating w/ same subvolume group name with new data pool -- should set pool
974 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", new_pool)
975 desired_pool = self.mount_a.getfattr(group_path, "ceph.dir.layout.pool")
976 try:
977 self.assertEqual(desired_pool, new_pool)
978 except AssertionError:
979 self.assertEqual(int(desired_pool), newid) # old kernel returns id
980
981 # remove group
982 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
983
984 def test_subvolume_group_create_idempotence_resize(self):
985 # create group
986 group = self._generate_random_group_name()
987 self._fs_cmd("subvolumegroup", "create", self.volname, group)
988
989 # try creating w/ same subvolume name with size -- should set quota
990 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
991
992 # get group metadata
993 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
994 self.assertEqual(group_info["bytes_quota"], 1000000000)
995
996 # remove group
997 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
998
999 def test_subvolume_group_quota_mds_path_restriction_to_group_path(self):
1000 """
1001 Tests subvolumegroup quota enforcement with mds path restriction set to group.
1002 For quota to be enforced, read permission needs to be provided to the parent
1003 of the directory on which quota is set. Please see the tracker comment [1]
1004 [1] https://tracker.ceph.com/issues/55090#note-8
1005 """
1006 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1007 # create group with 100MB quota
1008 group = self._generate_random_group_name()
1009 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1010 "--size", str(osize), "--mode=777")
1011
1012 # make sure it exists
1013 grouppath = self._get_subvolume_group_path(self.volname, group)
1014 self.assertNotEqual(grouppath, None)
1015
1016 # create subvolume under the group
1017 subvolname = self._generate_random_subvolume_name()
1018 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1019 "--group_name", group, "--mode=777")
1020
1021 # make sure it exists
1022 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1023 self.assertNotEqual(subvolpath, None)
1024
1025 # Create auth_id
1026 authid = "client.guest1"
1027 user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
1028 "auth", "get-or-create", authid,
1029 "mds", "allow rw path=/volumes",
1030 "mgr", "allow rw",
1031 "osd", "allow rw tag cephfs *=*",
1032 "mon", "allow r",
1033 "--format=json-pretty"
1034 ))
1035
1036 # Prepare guest_mount with new authid
1037 guest_mount = self.mount_b
1038 guest_mount.umount_wait()
1039
1040 # configure credentials for guest client
1041 self._configure_guest_auth(guest_mount, "guest1", user[0]["key"])
1042
1043 # mount the subvolume
1044 mount_path = os.path.join("/", subvolpath)
1045 guest_mount.mount_wait(cephfs_mntpt=mount_path)
1046
1047 # create 99 files of 1MB
1048 guest_mount.run_shell_payload("mkdir -p dir1")
1049 for i in range(99):
1050 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1051 guest_mount.write_n_mb(os.path.join("dir1", filename), self.DEFAULT_FILE_SIZE)
1052 try:
1053 # write two files of 1MB file to exceed the quota
1054 guest_mount.run_shell_payload("mkdir -p dir2")
1055 for i in range(2):
1056 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1057 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1058 # For quota to be enforced
1059 time.sleep(60)
1060 # create 400 files of 1MB to exceed quota
1061 for i in range(400):
1062 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1063 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1064 # Sometimes quota enforcement takes time.
1065 if i == 200:
1066 time.sleep(60)
1067 except CommandFailedError:
1068 pass
1069 else:
1070 self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1071
1072 # clean up
1073 guest_mount.umount_wait()
1074
1075 # Delete the subvolume
1076 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1077
1078 # remove group
1079 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1080
1081 # verify trash dir is clean
1082 self._wait_for_trash_empty()
1083
1084 def test_subvolume_group_quota_mds_path_restriction_to_subvolume_path(self):
1085 """
1086 Tests subvolumegroup quota enforcement with mds path restriction set to subvolume path
1087 The quota should not be enforced because of the fourth limitation mentioned at
1088 https://docs.ceph.com/en/latest/cephfs/quota/#limitations
1089 """
1090 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1091 # create group with 100MB quota
1092 group = self._generate_random_group_name()
1093 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1094 "--size", str(osize), "--mode=777")
1095
1096 # make sure it exists
1097 grouppath = self._get_subvolume_group_path(self.volname, group)
1098 self.assertNotEqual(grouppath, None)
1099
1100 # create subvolume under the group
1101 subvolname = self._generate_random_subvolume_name()
1102 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1103 "--group_name", group, "--mode=777")
1104
1105 # make sure it exists
1106 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1107 self.assertNotEqual(subvolpath, None)
1108
1109 mount_path = os.path.join("/", subvolpath)
1110
1111 # Create auth_id
1112 authid = "client.guest1"
1113 user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
1114 "auth", "get-or-create", authid,
1115 "mds", f"allow rw path={mount_path}",
1116 "mgr", "allow rw",
1117 "osd", "allow rw tag cephfs *=*",
1118 "mon", "allow r",
1119 "--format=json-pretty"
1120 ))
1121
1122 # Prepare guest_mount with new authid
1123 guest_mount = self.mount_b
1124 guest_mount.umount_wait()
1125
1126 # configure credentials for guest client
1127 self._configure_guest_auth(guest_mount, "guest1", user[0]["key"])
1128
1129 # mount the subvolume
1130 guest_mount.mount_wait(cephfs_mntpt=mount_path)
1131
1132 # create 99 files of 1MB to exceed quota
1133 guest_mount.run_shell_payload("mkdir -p dir1")
1134 for i in range(99):
1135 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1136 guest_mount.write_n_mb(os.path.join("dir1", filename), self.DEFAULT_FILE_SIZE)
1137 try:
1138 # write two files of 1MB file to exceed the quota
1139 guest_mount.run_shell_payload("mkdir -p dir2")
1140 for i in range(2):
1141 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1142 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1143 # For quota to be enforced
1144 time.sleep(60)
1145 # create 400 files of 1MB to exceed quota
1146 for i in range(400):
1147 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1148 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1149 # Sometimes quota enforcement takes time.
1150 if i == 200:
1151 time.sleep(60)
1152 except CommandFailedError:
1153 self.fail(f"Quota should not be enforced, expected filling subvolume {subvolname} with 400 files of size 1MB to succeed")
1154
1155 # clean up
1156 guest_mount.umount_wait()
1157
1158 # Delete the subvolume
1159 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1160
1161 # remove group
1162 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1163
1164 # verify trash dir is clean
1165 self._wait_for_trash_empty()
1166
1167 def test_subvolume_group_quota_exceeded_subvolume_removal(self):
1168 """
1169 Tests subvolume removal if it's group quota is exceeded
1170 """
1171 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1172 # create group with 100MB quota
1173 group = self._generate_random_group_name()
1174 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1175 "--size", str(osize), "--mode=777")
1176
1177 # make sure it exists
1178 grouppath = self._get_subvolume_group_path(self.volname, group)
1179 self.assertNotEqual(grouppath, None)
1180
1181 # create subvolume under the group
1182 subvolname = self._generate_random_subvolume_name()
1183 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1184 "--group_name", group, "--mode=777")
1185
1186 # make sure it exists
1187 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1188 self.assertNotEqual(subvolpath, None)
1189
1190 # create 99 files of 1MB to exceed quota
1191 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99)
1192
1193 try:
1194 # write two files of 1MB file to exceed the quota
1195 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1196 # For quota to be enforced
1197 time.sleep(20)
1198 # create 400 files of 1MB to exceed quota
1199 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=400)
1200 except CommandFailedError:
1201 # Delete subvolume when group quota is exceeded
1202 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1203 else:
1204 self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1205
1206 # remove group
1207 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1208
1209 # verify trash dir is clean
1210 self._wait_for_trash_empty()
1211
1212 def test_subvolume_group_quota_exceeded_subvolume_removal_retained_snaps(self):
1213 """
1214 Tests retained snapshot subvolume removal if it's group quota is exceeded
1215 """
1216 group = self._generate_random_group_name()
1217 subvolname = self._generate_random_subvolume_name()
1218 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
1219
1220 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1221 # create group with 100MB quota
1222 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1223 "--size", str(osize), "--mode=777")
1224
1225 # make sure it exists
1226 grouppath = self._get_subvolume_group_path(self.volname, group)
1227 self.assertNotEqual(grouppath, None)
1228
1229 # create subvolume under the group
1230 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1231 "--group_name", group, "--mode=777")
1232
1233 # make sure it exists
1234 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1235 self.assertNotEqual(subvolpath, None)
1236
1237 # create 99 files of 1MB to exceed quota
1238 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99)
1239
1240 # snapshot subvolume
1241 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot1, "--group_name", group)
1242 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot2, "--group_name", group)
1243
1244 try:
1245 # write two files of 1MB file to exceed the quota
1246 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1247 # For quota to be enforced
1248 time.sleep(20)
1249 # create 400 files of 1MB to exceed quota
1250 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=400)
1251 except CommandFailedError:
1252 # remove with snapshot retention
1253 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group, "--retain-snapshots")
1254 # remove snapshot1
1255 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot1, "--group_name", group)
1256 # remove snapshot2 (should remove volume)
1257 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot2, "--group_name", group)
1258 # verify subvolume trash is clean
1259 self._wait_for_subvol_trash_empty(subvolname, group=group)
1260 else:
1261 self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1262
1263 # remove group
1264 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1265
1266 # verify trash dir is clean
1267 self._wait_for_trash_empty()
1268
1269 def test_subvolume_group_quota_subvolume_removal(self):
1270 """
1271 Tests subvolume removal if it's group quota is set.
1272 """
1273 # create group with size -- should set quota
1274 group = self._generate_random_group_name()
1275 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
1276
1277 # create subvolume under the group
1278 subvolname = self._generate_random_subvolume_name()
1279 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
1280
1281 # remove subvolume
1282 try:
1283 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1284 except CommandFailedError:
1285 self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1286
1287 # remove subvolumegroup
1288 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1289
1290 # verify trash dir is clean
1291 self._wait_for_trash_empty()
1292
1293 def test_subvolume_group_quota_legacy_subvolume_removal(self):
1294 """
1295 Tests legacy subvolume removal if it's group quota is set.
1296 """
1297 subvolume = self._generate_random_subvolume_name()
1298 group = self._generate_random_group_name()
1299
1300 # emulate a old-fashioned subvolume -- in a custom group
1301 createpath1 = os.path.join(".", "volumes", group, subvolume)
1302 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
1303
1304 # this would auto-upgrade on access without anyone noticing
1305 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, "--group-name", group)
1306 self.assertNotEqual(subvolpath1, None)
1307 subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
1308
1309 # and... the subvolume path returned should be what we created behind the scene
1310 self.assertEqual(createpath1[1:], subvolpath1)
1311
1312 # Set subvolumegroup quota on idempotent subvolumegroup creation
1313 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
1314
1315 # remove subvolume
1316 try:
1317 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1318 except CommandFailedError:
1319 self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1320
1321 # remove subvolumegroup
1322 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1323
1324 # verify trash dir is clean
1325 self._wait_for_trash_empty()
1326
1327 def test_subvolume_group_quota_v1_subvolume_removal(self):
1328 """
1329 Tests v1 subvolume removal if it's group quota is set.
1330 """
1331 subvolume = self._generate_random_subvolume_name()
1332 group = self._generate_random_group_name()
1333
1334 # emulate a v1 subvolume -- in a custom group
1335 self._create_v1_subvolume(subvolume, subvol_group=group, has_snapshot=False)
1336
1337 # Set subvolumegroup quota on idempotent subvolumegroup creation
1338 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
1339
1340 # remove subvolume
1341 try:
1342 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1343 except CommandFailedError:
1344 self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1345
1346 # remove subvolumegroup
1347 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1348
1349 # verify trash dir is clean
1350 self._wait_for_trash_empty()
1351
1352 def test_subvolume_group_resize_fail_invalid_size(self):
1353 """
1354 That a subvolume group cannot be resized to an invalid size and the quota did not change
1355 """
1356
1357 osize = self.DEFAULT_FILE_SIZE*1024*1024
1358 # create group with 1MB quota
1359 group = self._generate_random_group_name()
1360 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--size", str(osize))
1361
1362 # make sure it exists
1363 grouppath = self._get_subvolume_group_path(self.volname, group)
1364 self.assertNotEqual(grouppath, None)
1365
1366 # try to resize the subvolume with an invalid size -10
1367 nsize = -10
1368 try:
1369 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1370 except CommandFailedError as ce:
1371 self.assertEqual(ce.exitstatus, errno.EINVAL,
1372 "invalid error code on resize of subvolume group with invalid size")
1373 else:
1374 self.fail("expected the 'fs subvolumegroup resize' command to fail")
1375
1376 # verify the quota did not change
1377 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1378 self.assertEqual(size, osize)
1379
1380 # remove group
1381 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1382
1383 def test_subvolume_group_resize_fail_zero_size(self):
1384 """
1385 That a subvolume group cannot be resized to a zero size and the quota did not change
1386 """
1387
1388 osize = self.DEFAULT_FILE_SIZE*1024*1024
1389 # create group with 1MB quota
1390 group = self._generate_random_group_name()
1391 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--size", str(osize))
1392
1393 # make sure it exists
1394 grouppath = self._get_subvolume_group_path(self.volname, group)
1395 self.assertNotEqual(grouppath, None)
1396
1397 # try to resize the subvolume group with size 0
1398 nsize = 0
1399 try:
1400 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1401 except CommandFailedError as ce:
1402 self.assertEqual(ce.exitstatus, errno.EINVAL,
1403 "invalid error code on resize of subvolume group with invalid size")
1404 else:
1405 self.fail("expected the 'fs subvolumegroup resize' command to fail")
1406
1407 # verify the quota did not change
1408 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1409 self.assertEqual(size, osize)
1410
1411 # remove group
1412 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1413
1414 def test_subvolume_group_resize_quota_lt_used_size(self):
1415 """
1416 That a subvolume group can be resized to a size smaller than the current used size
1417 and the resulting quota matches the expected size.
1418 """
1419
1420 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
1421 # create group with 20MB quota
1422 group = self._generate_random_group_name()
1423 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1424 "--size", str(osize), "--mode=777")
1425
1426 # make sure it exists
1427 grouppath = self._get_subvolume_group_path(self.volname, group)
1428 self.assertNotEqual(grouppath, None)
1429
1430 # create subvolume under the group
1431 subvolname = self._generate_random_subvolume_name()
1432 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1433 "--group_name", group, "--mode=777")
1434
1435 # make sure it exists
1436 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1437 self.assertNotEqual(subvolpath, None)
1438
1439 # create one file of 10MB
1440 file_size=self.DEFAULT_FILE_SIZE*10
1441 number_of_files=1
1442 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
1443 number_of_files,
1444 file_size))
1445 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
1446 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
1447
1448 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
1449
1450 # shrink the subvolume group
1451 nsize = usedsize // 2
1452 try:
1453 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1454 except CommandFailedError:
1455 self.fail("expected the 'fs subvolumegroup resize' command to succeed")
1456
1457 # verify the quota
1458 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1459 self.assertEqual(size, nsize)
1460
1461 # remove subvolume and group
1462 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1463 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1464
1465 # verify trash dir is clean
1466 self._wait_for_trash_empty()
1467
1468 def test_subvolume_group_resize_fail_quota_lt_used_size_no_shrink(self):
1469 """
1470 That a subvolume group cannot be resized to a size smaller than the current used size
1471 when --no_shrink is given and the quota did not change.
1472 """
1473
1474 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
1475 # create group with 20MB quota
1476 group = self._generate_random_group_name()
1477 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1478 "--size", str(osize), "--mode=777")
1479
1480 # make sure it exists
1481 grouppath = self._get_subvolume_group_path(self.volname, group)
1482 self.assertNotEqual(grouppath, None)
1483
1484 # create subvolume under the group
1485 subvolname = self._generate_random_subvolume_name()
1486 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1487 "--group_name", group, "--mode=777")
1488
1489 # make sure it exists
1490 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1491 self.assertNotEqual(subvolpath, None)
1492
1493 # create one file of 10MB
1494 file_size=self.DEFAULT_FILE_SIZE*10
1495 number_of_files=1
1496 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
1497 number_of_files,
1498 file_size))
1499 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
1500 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
1501
1502 usedsize = int(self.mount_a.getfattr(grouppath, "ceph.dir.rbytes"))
1503
1504 # shrink the subvolume group
1505 nsize = usedsize // 2
1506 try:
1507 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize), "--no_shrink")
1508 except CommandFailedError as ce:
1509 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolumegroup with quota less than used")
1510 else:
1511 self.fail("expected the 'fs subvolumegroup resize' command to fail")
1512
1513 # verify the quota did not change
1514 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1515 self.assertEqual(size, osize)
1516
1517 # remove subvolume and group
1518 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1519 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1520
1521 # verify trash dir is clean
1522 self._wait_for_trash_empty()
1523
1524 def test_subvolume_group_resize_expand_on_full_subvolume(self):
1525 """
1526 That the subvolume group can be expanded after it is full and future write succeed
1527 """
1528
1529 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1530 # create group with 100MB quota
1531 group = self._generate_random_group_name()
1532 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1533 "--size", str(osize), "--mode=777")
1534
1535 # make sure it exists
1536 grouppath = self._get_subvolume_group_path(self.volname, group)
1537 self.assertNotEqual(grouppath, None)
1538
1539 # create subvolume under the group
1540 subvolname = self._generate_random_subvolume_name()
1541 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1542 "--group_name", group, "--mode=777")
1543
1544 # make sure it exists
1545 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1546 self.assertNotEqual(subvolpath, None)
1547
1548 # create 99 files of 1MB
1549 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99)
1550
1551 try:
1552 # write two files of 1MB file to exceed the quota
1553 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1554 # For quota to be enforced
1555 time.sleep(20)
1556 # create 500 files of 1MB
1557 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1558 except CommandFailedError:
1559 # Not able to write. So expand the subvolumegroup more and try writing the files again
1560 nsize = osize*7
1561 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1562 try:
1563 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1564 except CommandFailedError:
1565 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1566 "to succeed".format(subvolname))
1567 else:
1568 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1569 "to fail".format(subvolname))
1570
1571 # remove subvolume and group
1572 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1573 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1574
1575 # verify trash dir is clean
1576 self._wait_for_trash_empty()
1577
1578 def test_subvolume_group_resize_infinite_size(self):
1579 """
1580 That a subvolume group can be resized to an infinite size by unsetting its quota.
1581 """
1582
1583 osize = self.DEFAULT_FILE_SIZE*1024*1024
1584 # create group
1585 group = self._generate_random_group_name()
1586 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1587 "--size", str(osize))
1588
1589 # make sure it exists
1590 grouppath = self._get_subvolume_group_path(self.volname, group)
1591 self.assertNotEqual(grouppath, None)
1592
1593 # resize inf
1594 self._fs_cmd("subvolumegroup", "resize", self.volname, group, "inf")
1595
1596 # verify that the quota is None
1597 size = self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")
1598 self.assertEqual(size, None)
1599
1600 # remove subvolume group
1601 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1602
1603 def test_subvolume_group_resize_infinite_size_future_writes(self):
1604 """
1605 That a subvolume group can be resized to an infinite size and the future writes succeed.
1606 """
1607
1608 osize = self.DEFAULT_FILE_SIZE*1024*1024*5
1609 # create group with 5MB quota
1610 group = self._generate_random_group_name()
1611 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1612 "--size", str(osize), "--mode=777")
1613
1614 # make sure it exists
1615 grouppath = self._get_subvolume_group_path(self.volname, group)
1616 self.assertNotEqual(grouppath, None)
1617
1618 # create subvolume under the group
1619 subvolname = self._generate_random_subvolume_name()
1620 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1621 "--group_name", group, "--mode=777")
1622
1623 # make sure it exists
1624 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1625 self.assertNotEqual(subvolpath, None)
1626
1627 # create 4 files of 1MB
1628 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=4)
1629
1630 try:
1631 # write two files of 1MB file to exceed the quota
1632 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1633 # For quota to be enforced
1634 time.sleep(20)
1635 # create 500 files of 1MB
1636 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1637 except CommandFailedError:
1638 # Not able to write. So resize subvolumegroup to 'inf' and try writing the files again
1639 # resize inf
1640 self._fs_cmd("subvolumegroup", "resize", self.volname, group, "inf")
1641 try:
1642 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1643 except CommandFailedError:
1644 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1645 "to succeed".format(subvolname))
1646 else:
1647 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1648 "to fail".format(subvolname))
1649
1650
1651 # verify that the quota is None
1652 size = self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")
1653 self.assertEqual(size, None)
1654
1655 # remove subvolume and group
1656 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1657 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1658
1659 # verify trash dir is clean
1660 self._wait_for_trash_empty()
1661
1662 def test_subvolume_group_ls(self):
1663 # tests the 'fs subvolumegroup ls' command
1664
1665 subvolumegroups = []
1666
1667 #create subvolumegroups
1668 subvolumegroups = self._generate_random_group_name(3)
1669 for groupname in subvolumegroups:
1670 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
1671
1672 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1673 if len(subvolumegroupls) == 0:
1674 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
1675 else:
1676 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
1677 if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups):
1678 raise RuntimeError("Error creating or listing subvolume groups")
1679
1680 def test_subvolume_group_ls_filter(self):
1681 # tests the 'fs subvolumegroup ls' command filters '_deleting' directory
1682
1683 subvolumegroups = []
1684
1685 #create subvolumegroup
1686 subvolumegroups = self._generate_random_group_name(3)
1687 for groupname in subvolumegroups:
1688 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
1689
1690 # create subvolume and remove. This creates '_deleting' directory.
1691 subvolume = self._generate_random_subvolume_name()
1692 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1693 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1694
1695 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1696 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
1697 if "_deleting" in subvolgroupnames:
1698 self.fail("Listing subvolume groups listed '_deleting' directory")
1699
1700 def test_subvolume_group_ls_filter_internal_directories(self):
1701 # tests the 'fs subvolumegroup ls' command filters internal directories
1702 # eg: '_deleting', '_nogroup', '_index', "_legacy"
1703
1704 subvolumegroups = self._generate_random_group_name(3)
1705 subvolume = self._generate_random_subvolume_name()
1706 snapshot = self._generate_random_snapshot_name()
1707 clone = self._generate_random_clone_name()
1708
1709 #create subvolumegroups
1710 for groupname in subvolumegroups:
1711 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
1712
1713 # create subvolume which will create '_nogroup' directory
1714 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1715
1716 # create snapshot
1717 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1718
1719 # clone snapshot which will create '_index' directory
1720 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
1721
1722 # wait for clone to complete
1723 self._wait_for_clone_to_complete(clone)
1724
1725 # remove snapshot
1726 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
1727
1728 # remove subvolume which will create '_deleting' directory
1729 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1730
1731 # list subvolumegroups
1732 ret = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1733 self.assertEqual(len(ret), len(subvolumegroups))
1734
1735 ret_list = [subvolumegroup['name'] for subvolumegroup in ret]
1736 self.assertEqual(len(ret_list), len(subvolumegroups))
1737
1738 self.assertEqual(all(elem in subvolumegroups for elem in ret_list), True)
1739
1740 # cleanup
1741 self._fs_cmd("subvolume", "rm", self.volname, clone)
1742 for groupname in subvolumegroups:
1743 self._fs_cmd("subvolumegroup", "rm", self.volname, groupname)
1744
1745 def test_subvolume_group_ls_for_nonexistent_volume(self):
1746 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
1747 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
1748
1749 # list subvolume groups
1750 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1751 if len(subvolumegroupls) > 0:
1752 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
1753
1754 def test_subvolumegroup_pin_distributed(self):
1755 self.fs.set_max_mds(2)
1756 status = self.fs.wait_for_daemons()
1757 self.config_set('mds', 'mds_export_ephemeral_distributed', True)
1758
1759 group = "pinme"
1760 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1761 self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True")
1762 subvolumes = self._generate_random_subvolume_name(50)
1763 for subvolume in subvolumes:
1764 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1765 self._wait_distributed_subtrees(2 * 2, status=status, rank="all")
1766
1767 # remove subvolumes
1768 for subvolume in subvolumes:
1769 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1770
1771 # verify trash dir is clean
1772 self._wait_for_trash_empty()
1773
1774 def test_subvolume_group_rm_force(self):
1775 # test removing non-existing subvolume group with --force
1776 group = self._generate_random_group_name()
1777 try:
1778 self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force")
1779 except CommandFailedError:
1780 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
1781
1782 def test_subvolume_group_exists_with_subvolumegroup_and_no_subvolume(self):
1783 """Test the presence of any subvolumegroup when only subvolumegroup is present"""
1784
1785 group = self._generate_random_group_name()
1786 # create subvolumegroup
1787 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1788 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1789 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1790 # delete subvolumegroup
1791 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1792 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1793 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1794
1795 def test_subvolume_group_exists_with_no_subvolumegroup_and_subvolume(self):
1796 """Test the presence of any subvolumegroup when no subvolumegroup is present"""
1797
1798 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1799 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1800
1801 def test_subvolume_group_exists_with_subvolumegroup_and_subvolume(self):
1802 """Test the presence of any subvolume when subvolumegroup
1803 and subvolume both are present"""
1804
1805 group = self._generate_random_group_name()
1806 subvolume = self._generate_random_subvolume_name(2)
1807 # create subvolumegroup
1808 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1809 # create subvolume in group
1810 self._fs_cmd("subvolume", "create", self.volname, subvolume[0], "--group_name", group)
1811 # create subvolume
1812 self._fs_cmd("subvolume", "create", self.volname, subvolume[1])
1813 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1814 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1815 # delete subvolume in group
1816 self._fs_cmd("subvolume", "rm", self.volname, subvolume[0], "--group_name", group)
1817 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1818 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1819 # delete subvolume
1820 self._fs_cmd("subvolume", "rm", self.volname, subvolume[1])
1821 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1822 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1823 # delete subvolumegroup
1824 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1825 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1826 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1827
1828 def test_subvolume_group_exists_without_subvolumegroup_and_with_subvolume(self):
1829 """Test the presence of any subvolume when subvolume is present
1830 but no subvolumegroup is present"""
1831
1832 subvolume = self._generate_random_subvolume_name()
1833 # create subvolume
1834 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1835 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1836 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1837 # delete subvolume
1838 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1839 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1840 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1841
1842
1843 class TestSubvolumes(TestVolumesHelper):
1844 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
1845 def test_async_subvolume_rm(self):
1846 subvolumes = self._generate_random_subvolume_name(100)
1847
1848 # create subvolumes
1849 for subvolume in subvolumes:
1850 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
1851 self._do_subvolume_io(subvolume, number_of_files=10)
1852
1853 self.mount_a.umount_wait()
1854
1855 # remove subvolumes
1856 for subvolume in subvolumes:
1857 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1858
1859 self.mount_a.mount_wait()
1860
1861 # verify trash dir is clean
1862 self._wait_for_trash_empty(timeout=300)
1863
1864 def test_default_uid_gid_subvolume(self):
1865 subvolume = self._generate_random_subvolume_name()
1866 expected_uid = 0
1867 expected_gid = 0
1868
1869 # create subvolume
1870 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1871 subvol_path = self._get_subvolume_path(self.volname, subvolume)
1872
1873 # check subvolume's uid and gid
1874 stat = self.mount_a.stat(subvol_path)
1875 self.assertEqual(stat['st_uid'], expected_uid)
1876 self.assertEqual(stat['st_gid'], expected_gid)
1877
1878 # remove subvolume
1879 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1880
1881 # verify trash dir is clean
1882 self._wait_for_trash_empty()
1883
1884 def test_nonexistent_subvolume_rm(self):
1885 # remove non-existing subvolume
1886 subvolume = "non_existent_subvolume"
1887
1888 # try, remove subvolume
1889 try:
1890 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1891 except CommandFailedError as ce:
1892 if ce.exitstatus != errno.ENOENT:
1893 raise
1894 else:
1895 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
1896
1897 def test_subvolume_create_and_rm(self):
1898 # create subvolume
1899 subvolume = self._generate_random_subvolume_name()
1900 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1901
1902 # make sure it exists
1903 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1904 self.assertNotEqual(subvolpath, None)
1905
1906 # remove subvolume
1907 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1908 # make sure its gone
1909 try:
1910 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1911 except CommandFailedError as ce:
1912 if ce.exitstatus != errno.ENOENT:
1913 raise
1914 else:
1915 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
1916
1917 # verify trash dir is clean
1918 self._wait_for_trash_empty()
1919
1920 def test_subvolume_create_and_rm_in_group(self):
1921 subvolume = self._generate_random_subvolume_name()
1922 group = self._generate_random_group_name()
1923
1924 # create group
1925 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1926
1927 # create subvolume in group
1928 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1929
1930 # remove subvolume
1931 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1932
1933 # verify trash dir is clean
1934 self._wait_for_trash_empty()
1935
1936 # remove group
1937 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1938
1939 def test_subvolume_create_idempotence(self):
1940 # create subvolume
1941 subvolume = self._generate_random_subvolume_name()
1942 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1943
1944 # try creating w/ same subvolume name -- should be idempotent
1945 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1946
1947 # remove subvolume
1948 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1949
1950 # verify trash dir is clean
1951 self._wait_for_trash_empty()
1952
1953 def test_subvolume_create_idempotence_resize(self):
1954 # create subvolume
1955 subvolume = self._generate_random_subvolume_name()
1956 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1957
1958 # try creating w/ same subvolume name with size -- should set quota
1959 self._fs_cmd("subvolume", "create", self.volname, subvolume, "1000000000")
1960
1961 # get subvolume metadata
1962 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1963 self.assertEqual(subvol_info["bytes_quota"], 1000000000)
1964
1965 # remove subvolume
1966 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1967
1968 # verify trash dir is clean
1969 self._wait_for_trash_empty()
1970
1971 def test_subvolume_create_idempotence_mode(self):
1972 # default mode
1973 default_mode = "755"
1974
1975 # create subvolume
1976 subvolume = self._generate_random_subvolume_name()
1977 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1978
1979 subvol_path = self._get_subvolume_path(self.volname, subvolume)
1980
1981 actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
1982 self.assertEqual(actual_mode_1, default_mode)
1983
1984 # try creating w/ same subvolume name with --mode 777
1985 new_mode = "777"
1986 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", new_mode)
1987
1988 actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
1989 self.assertEqual(actual_mode_2, new_mode)
1990
1991 # remove subvolume
1992 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1993
1994 # verify trash dir is clean
1995 self._wait_for_trash_empty()
1996
1997 def test_subvolume_create_idempotence_without_passing_mode(self):
1998 # create subvolume
1999 desired_mode = "777"
2000 subvolume = self._generate_random_subvolume_name()
2001 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", desired_mode)
2002
2003 subvol_path = self._get_subvolume_path(self.volname, subvolume)
2004
2005 actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
2006 self.assertEqual(actual_mode_1, desired_mode)
2007
2008 # default mode
2009 default_mode = "755"
2010
2011 # try creating w/ same subvolume name without passing --mode argument
2012 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2013
2014 actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
2015 self.assertEqual(actual_mode_2, default_mode)
2016
2017 # remove subvolume
2018 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2019
2020 # verify trash dir is clean
2021 self._wait_for_trash_empty()
2022
2023 def test_subvolume_create_isolated_namespace(self):
2024 """
2025 Create subvolume in separate rados namespace
2026 """
2027
2028 # create subvolume
2029 subvolume = self._generate_random_subvolume_name()
2030 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated")
2031
2032 # get subvolume metadata
2033 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
2034 self.assertNotEqual(len(subvol_info), 0)
2035 self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume)
2036
2037 # remove subvolumes
2038 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2039
2040 # verify trash dir is clean
2041 self._wait_for_trash_empty()
2042
2043 def test_subvolume_create_with_auto_cleanup_on_fail(self):
2044 subvolume = self._generate_random_subvolume_name()
2045 data_pool = "invalid_pool"
2046 # create subvolume with invalid data pool layout fails
2047 with self.assertRaises(CommandFailedError):
2048 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
2049
2050 # check whether subvol path is cleaned up
2051 try:
2052 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
2053 except CommandFailedError as ce:
2054 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume")
2055 else:
2056 self.fail("expected the 'fs subvolume getpath' command to fail")
2057
2058 # verify trash dir is clean
2059 self._wait_for_trash_empty()
2060
2061 def test_subvolume_create_with_desired_data_pool_layout_in_group(self):
2062 subvol1, subvol2 = self._generate_random_subvolume_name(2)
2063 group = self._generate_random_group_name()
2064
2065 # create group. this also helps set default pool layout for subvolumes
2066 # created within the group.
2067 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2068
2069 # create subvolume in group.
2070 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
2071 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
2072
2073 default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool")
2074 new_pool = "new_pool"
2075 self.assertNotEqual(default_pool, new_pool)
2076
2077 # add data pool
2078 newid = self.fs.add_data_pool(new_pool)
2079
2080 # create subvolume specifying the new data pool as its pool layout
2081 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group,
2082 "--pool_layout", new_pool)
2083 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
2084
2085 desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool")
2086 try:
2087 self.assertEqual(desired_pool, new_pool)
2088 except AssertionError:
2089 self.assertEqual(int(desired_pool), newid) # old kernel returns id
2090
2091 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
2092 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
2093 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2094
2095 # verify trash dir is clean
2096 self._wait_for_trash_empty()
2097
2098 def test_subvolume_create_with_desired_mode(self):
2099 subvol1 = self._generate_random_subvolume_name()
2100
2101 # default mode
2102 default_mode = "755"
2103 # desired mode
2104 desired_mode = "777"
2105
2106 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--mode", "777")
2107
2108 subvol1_path = self._get_subvolume_path(self.volname, subvol1)
2109
2110 # check subvolumegroup's mode
2111 subvol_par_path = os.path.dirname(subvol1_path)
2112 group_path = os.path.dirname(subvol_par_path)
2113 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip()
2114 self.assertEqual(actual_mode1, default_mode)
2115 # check /volumes mode
2116 volumes_path = os.path.dirname(group_path)
2117 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
2118 self.assertEqual(actual_mode2, default_mode)
2119 # check subvolume's mode
2120 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
2121 self.assertEqual(actual_mode3, desired_mode)
2122
2123 self._fs_cmd("subvolume", "rm", self.volname, subvol1)
2124
2125 # verify trash dir is clean
2126 self._wait_for_trash_empty()
2127
2128 def test_subvolume_create_with_desired_mode_in_group(self):
2129 subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
2130
2131 group = self._generate_random_group_name()
2132 # default mode
2133 expected_mode1 = "755"
2134 # desired mode
2135 expected_mode2 = "777"
2136
2137 # create group
2138 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2139
2140 # create subvolume in group
2141 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
2142 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777")
2143 # check whether mode 0777 also works
2144 self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777")
2145
2146 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
2147 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
2148 subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group)
2149
2150 # check subvolume's mode
2151 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
2152 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip()
2153 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip()
2154 self.assertEqual(actual_mode1, expected_mode1)
2155 self.assertEqual(actual_mode2, expected_mode2)
2156 self.assertEqual(actual_mode3, expected_mode2)
2157
2158 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
2159 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
2160 self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
2161 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2162
2163 # verify trash dir is clean
2164 self._wait_for_trash_empty()
2165
2166 def test_subvolume_create_with_desired_uid_gid(self):
2167 """
2168 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
2169 expected values.
2170 """
2171 uid = 1000
2172 gid = 1000
2173
2174 # create subvolume
2175 subvolname = self._generate_random_subvolume_name()
2176 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid))
2177
2178 # make sure it exists
2179 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2180 self.assertNotEqual(subvolpath, None)
2181
2182 # verify the uid and gid
2183 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip())
2184 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip())
2185 self.assertEqual(uid, suid)
2186 self.assertEqual(gid, sgid)
2187
2188 # remove subvolume
2189 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2190
2191 # verify trash dir is clean
2192 self._wait_for_trash_empty()
2193
2194 def test_subvolume_create_with_invalid_data_pool_layout(self):
2195 subvolume = self._generate_random_subvolume_name()
2196 data_pool = "invalid_pool"
2197 # create subvolume with invalid data pool layout
2198 try:
2199 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
2200 except CommandFailedError as ce:
2201 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid pool layout")
2202 else:
2203 self.fail("expected the 'fs subvolume create' command to fail")
2204
2205 # verify trash dir is clean
2206 self._wait_for_trash_empty()
2207
2208 def test_subvolume_create_with_invalid_size(self):
2209 # create subvolume with an invalid size -1
2210 subvolume = self._generate_random_subvolume_name()
2211 try:
2212 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--size", "-1")
2213 except CommandFailedError as ce:
2214 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid size")
2215 else:
2216 self.fail("expected the 'fs subvolume create' command to fail")
2217
2218 # verify trash dir is clean
2219 self._wait_for_trash_empty()
2220
2221 def test_subvolume_create_and_ls_providing_group_as_nogroup(self):
2222 """
2223 That a 'subvolume create' and 'subvolume ls' should throw
2224 permission denied error if option --group=_nogroup is provided.
2225 """
2226
2227 subvolname = self._generate_random_subvolume_name()
2228
2229 # try to create subvolume providing --group_name=_nogroup option
2230 try:
2231 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", "_nogroup")
2232 except CommandFailedError as ce:
2233 self.assertEqual(ce.exitstatus, errno.EPERM)
2234 else:
2235 self.fail("expected the 'fs subvolume create' command to fail")
2236
2237 # create subvolume
2238 self._fs_cmd("subvolume", "create", self.volname, subvolname)
2239
2240 # try to list subvolumes providing --group_name=_nogroup option
2241 try:
2242 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_nogroup")
2243 except CommandFailedError as ce:
2244 self.assertEqual(ce.exitstatus, errno.EPERM)
2245 else:
2246 self.fail("expected the 'fs subvolume ls' command to fail")
2247
2248 # list subvolumes
2249 self._fs_cmd("subvolume", "ls", self.volname)
2250
2251 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2252
2253 # verify trash dir is clean.
2254 self._wait_for_trash_empty()
2255
2256 def test_subvolume_expand(self):
2257 """
2258 That a subvolume can be expanded in size and its quota matches the expected size.
2259 """
2260
2261 # create subvolume
2262 subvolname = self._generate_random_subvolume_name()
2263 osize = self.DEFAULT_FILE_SIZE*1024*1024
2264 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
2265
2266 # make sure it exists
2267 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2268 self.assertNotEqual(subvolpath, None)
2269
2270 # expand the subvolume
2271 nsize = osize*2
2272 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2273
2274 # verify the quota
2275 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2276 self.assertEqual(size, nsize)
2277
2278 # remove subvolume
2279 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2280
2281 # verify trash dir is clean
2282 self._wait_for_trash_empty()
2283
2284 def test_subvolume_info(self):
2285 # tests the 'fs subvolume info' command
2286
2287 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
2288 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
2289 "type", "uid", "features", "state"]
2290
2291 # create subvolume
2292 subvolume = self._generate_random_subvolume_name()
2293 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2294
2295 # get subvolume metadata
2296 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
2297 for md in subvol_md:
2298 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
2299
2300 self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
2301 self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
2302 self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty")
2303 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
2304
2305 self.assertEqual(len(subvol_info["features"]), 3,
2306 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
2307 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2308 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
2309
2310 nsize = self.DEFAULT_FILE_SIZE*1024*1024
2311 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
2312
2313 # get subvolume metadata after quota set
2314 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
2315 for md in subvol_md:
2316 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
2317
2318 self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
2319 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
2320 self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume")
2321 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
2322
2323 self.assertEqual(len(subvol_info["features"]), 3,
2324 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
2325 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2326 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
2327
2328 # remove subvolumes
2329 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2330
2331 # verify trash dir is clean
2332 self._wait_for_trash_empty()
2333
2334 def test_subvolume_ls(self):
2335 # tests the 'fs subvolume ls' command
2336
2337 subvolumes = []
2338
2339 # create subvolumes
2340 subvolumes = self._generate_random_subvolume_name(3)
2341 for subvolume in subvolumes:
2342 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2343
2344 # list subvolumes
2345 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2346 if len(subvolumels) == 0:
2347 self.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
2348 else:
2349 subvolnames = [subvolume['name'] for subvolume in subvolumels]
2350 if collections.Counter(subvolnames) != collections.Counter(subvolumes):
2351 self.fail("Error creating or listing subvolumes")
2352
2353 # remove subvolume
2354 for subvolume in subvolumes:
2355 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2356
2357 # verify trash dir is clean
2358 self._wait_for_trash_empty()
2359
2360 def test_subvolume_ls_with_groupname_as_internal_directory(self):
2361 # tests the 'fs subvolume ls' command when the default groupname as internal directories
2362 # Eg: '_nogroup', '_legacy', '_deleting', '_index'.
2363 # Expecting 'fs subvolume ls' will be fail with errno EINVAL for '_legacy', '_deleting', '_index'
2364 # Expecting 'fs subvolume ls' will be fail with errno EPERM for '_nogroup'
2365
2366 # try to list subvolumes providing --group_name=_nogroup option
2367 try:
2368 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_nogroup")
2369 except CommandFailedError as ce:
2370 self.assertEqual(ce.exitstatus, errno.EPERM)
2371 else:
2372 self.fail("expected the 'fs subvolume ls' command to fail with error 'EPERM' for _nogroup")
2373
2374 # try to list subvolumes providing --group_name=_legacy option
2375 try:
2376 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_legacy")
2377 except CommandFailedError as ce:
2378 self.assertEqual(ce.exitstatus, errno.EINVAL)
2379 else:
2380 self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _legacy")
2381
2382 # try to list subvolumes providing --group_name=_deleting option
2383 try:
2384 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_deleting")
2385 except CommandFailedError as ce:
2386 self.assertEqual(ce.exitstatus, errno.EINVAL)
2387 else:
2388 self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _deleting")
2389
2390 # try to list subvolumes providing --group_name=_index option
2391 try:
2392 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_index")
2393 except CommandFailedError as ce:
2394 self.assertEqual(ce.exitstatus, errno.EINVAL)
2395 else:
2396 self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _index")
2397
2398 def test_subvolume_ls_for_notexistent_default_group(self):
2399 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
2400 # prerequisite: we expect that the volume is created and the default group _nogroup is
2401 # NOT created (i.e. a subvolume without group is not created)
2402
2403 # list subvolumes
2404 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2405 if len(subvolumels) > 0:
2406 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
2407
2408 def test_subvolume_marked(self):
2409 """
2410 ensure a subvolume is marked with the ceph.dir.subvolume xattr
2411 """
2412 subvolume = self._generate_random_subvolume_name()
2413
2414 # create subvolume
2415 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2416
2417 # getpath
2418 subvolpath = self._get_subvolume_path(self.volname, subvolume)
2419
2420 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
2421 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
2422 # outside the subvolume
2423 dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location')
2424 srcpath = os.path.join(self.mount_a.mountpoint, subvolpath)
2425 rename_script = dedent("""
2426 import os
2427 import errno
2428 try:
2429 os.rename("{src}", "{dst}")
2430 except OSError as e:
2431 if e.errno != errno.EXDEV:
2432 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
2433 else:
2434 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
2435 """)
2436 self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath), sudo=True)
2437
2438 # remove subvolume
2439 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2440
2441 # verify trash dir is clean
2442 self._wait_for_trash_empty()
2443
2444 def test_subvolume_pin_export(self):
2445 self.fs.set_max_mds(2)
2446 status = self.fs.wait_for_daemons()
2447
2448 subvolume = self._generate_random_subvolume_name()
2449 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2450 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1")
2451 path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
2452 path = os.path.dirname(path) # get subvolume path
2453
2454 self._get_subtrees(status=status, rank=1)
2455 self._wait_subtrees([(path, 1)], status=status)
2456
2457 # remove subvolume
2458 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2459
2460 # verify trash dir is clean
2461 self._wait_for_trash_empty()
2462
2463 ### authorize operations
2464
2465 def test_authorize_deauthorize_legacy_subvolume(self):
2466 subvolume = self._generate_random_subvolume_name()
2467 group = self._generate_random_group_name()
2468 authid = "alice"
2469
2470 guest_mount = self.mount_b
2471 guest_mount.umount_wait()
2472
2473 # emulate a old-fashioned subvolume in a custom group
2474 createpath = os.path.join(".", "volumes", group, subvolume)
2475 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
2476
2477 # add required xattrs to subvolume
2478 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
2479 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
2480
2481 mount_path = os.path.join("/", "volumes", group, subvolume)
2482
2483 # authorize guest authID read-write access to subvolume
2484 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2485 "--group_name", group, "--tenant_id", "tenant_id")
2486
2487 # guest authID should exist
2488 existing_ids = [a['entity'] for a in self.auth_list()]
2489 self.assertIn("client.{0}".format(authid), existing_ids)
2490
2491 # configure credentials for guest client
2492 self._configure_guest_auth(guest_mount, authid, key)
2493
2494 # mount the subvolume, and write to it
2495 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2496 guest_mount.write_n_mb("data.bin", 1)
2497
2498 # authorize guest authID read access to subvolume
2499 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2500 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
2501
2502 # guest client sees the change in access level to read only after a
2503 # remount of the subvolume.
2504 guest_mount.umount_wait()
2505 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2506
2507 # read existing content of the subvolume
2508 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
2509 # cannot write into read-only subvolume
2510 with self.assertRaises(CommandFailedError):
2511 guest_mount.write_n_mb("rogue.bin", 1)
2512
2513 # cleanup
2514 guest_mount.umount_wait()
2515 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
2516 "--group_name", group)
2517 # guest authID should no longer exist
2518 existing_ids = [a['entity'] for a in self.auth_list()]
2519 self.assertNotIn("client.{0}".format(authid), existing_ids)
2520 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2521 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2522
2523 def test_authorize_deauthorize_subvolume(self):
2524 subvolume = self._generate_random_subvolume_name()
2525 group = self._generate_random_group_name()
2526 authid = "alice"
2527
2528 guest_mount = self.mount_b
2529 guest_mount.umount_wait()
2530
2531 # create group
2532 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=777")
2533
2534 # create subvolume in group
2535 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2536 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
2537 "--group_name", group).rstrip()
2538
2539 # authorize guest authID read-write access to subvolume
2540 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2541 "--group_name", group, "--tenant_id", "tenant_id")
2542
2543 # guest authID should exist
2544 existing_ids = [a['entity'] for a in self.auth_list()]
2545 self.assertIn("client.{0}".format(authid), existing_ids)
2546
2547 # configure credentials for guest client
2548 self._configure_guest_auth(guest_mount, authid, key)
2549
2550 # mount the subvolume, and write to it
2551 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2552 guest_mount.write_n_mb("data.bin", 1)
2553
2554 # authorize guest authID read access to subvolume
2555 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2556 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
2557
2558 # guest client sees the change in access level to read only after a
2559 # remount of the subvolume.
2560 guest_mount.umount_wait()
2561 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2562
2563 # read existing content of the subvolume
2564 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
2565 # cannot write into read-only subvolume
2566 with self.assertRaises(CommandFailedError):
2567 guest_mount.write_n_mb("rogue.bin", 1)
2568
2569 # cleanup
2570 guest_mount.umount_wait()
2571 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
2572 "--group_name", group)
2573 # guest authID should no longer exist
2574 existing_ids = [a['entity'] for a in self.auth_list()]
2575 self.assertNotIn("client.{0}".format(authid), existing_ids)
2576 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2577 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2578
2579 def test_multitenant_subvolumes(self):
2580 """
2581 That subvolume access can be restricted to a tenant.
2582
2583 That metadata used to enforce tenant isolation of
2584 subvolumes is stored as a two-way mapping between auth
2585 IDs and subvolumes that they're authorized to access.
2586 """
2587 subvolume = self._generate_random_subvolume_name()
2588 group = self._generate_random_group_name()
2589
2590 guest_mount = self.mount_b
2591
2592 # Guest clients belonging to different tenants, but using the same
2593 # auth ID.
2594 auth_id = "alice"
2595 guestclient_1 = {
2596 "auth_id": auth_id,
2597 "tenant_id": "tenant1",
2598 }
2599 guestclient_2 = {
2600 "auth_id": auth_id,
2601 "tenant_id": "tenant2",
2602 }
2603
2604 # create group
2605 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2606
2607 # create subvolume in group
2608 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2609
2610 # Check that subvolume metadata file is created on subvolume creation.
2611 subvol_metadata_filename = "_{0}:{1}.meta".format(group, subvolume)
2612 self.assertIn(subvol_metadata_filename, guest_mount.ls("volumes"))
2613
2614 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
2615 # 'tenant1', with 'rw' access to the volume.
2616 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2617 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2618
2619 # Check that auth metadata file for auth ID 'alice', is
2620 # created on authorizing 'alice' access to the subvolume.
2621 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2622 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2623
2624 # Verify that the auth metadata file stores the tenant ID that the
2625 # auth ID belongs to, the auth ID's authorized access levels
2626 # for different subvolumes, versioning details, etc.
2627 expected_auth_metadata = {
2628 "version": 5,
2629 "compat_version": 6,
2630 "dirty": False,
2631 "tenant_id": "tenant1",
2632 "subvolumes": {
2633 "{0}/{1}".format(group,subvolume): {
2634 "dirty": False,
2635 "access_level": "rw"
2636 }
2637 }
2638 }
2639
2640 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
2641 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
2642 del expected_auth_metadata["version"]
2643 del auth_metadata["version"]
2644 self.assertEqual(expected_auth_metadata, auth_metadata)
2645
2646 # Verify that the subvolume metadata file stores info about auth IDs
2647 # and their access levels to the subvolume, versioning details, etc.
2648 expected_subvol_metadata = {
2649 "version": 1,
2650 "compat_version": 1,
2651 "auths": {
2652 "alice": {
2653 "dirty": False,
2654 "access_level": "rw"
2655 }
2656 }
2657 }
2658 subvol_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(subvol_metadata_filename)))
2659
2660 self.assertGreaterEqual(subvol_metadata["version"], expected_subvol_metadata["version"])
2661 del expected_subvol_metadata["version"]
2662 del subvol_metadata["version"]
2663 self.assertEqual(expected_subvol_metadata, subvol_metadata)
2664
2665 # Cannot authorize 'guestclient_2' to access the volume.
2666 # It uses auth ID 'alice', which has already been used by a
2667 # 'guestclient_1' belonging to an another tenant for accessing
2668 # the volume.
2669
2670 try:
2671 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_2["auth_id"],
2672 "--group_name", group, "--tenant_id", guestclient_2["tenant_id"])
2673 except CommandFailedError as ce:
2674 self.assertEqual(ce.exitstatus, errno.EPERM,
2675 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
2676 else:
2677 self.fail("expected the 'fs subvolume authorize' command to fail")
2678
2679 # Check that auth metadata file is cleaned up on removing
2680 # auth ID's only access to a volume.
2681
2682 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
2683 "--group_name", group)
2684 self.assertNotIn(auth_metadata_filename, guest_mount.ls("volumes"))
2685
2686 # Check that subvolume metadata file is cleaned up on subvolume deletion.
2687 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2688 self.assertNotIn(subvol_metadata_filename, guest_mount.ls("volumes"))
2689
2690 # clean up
2691 guest_mount.umount_wait()
2692 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2693
2694 def test_subvolume_authorized_list(self):
2695 subvolume = self._generate_random_subvolume_name()
2696 group = self._generate_random_group_name()
2697 authid1 = "alice"
2698 authid2 = "guest1"
2699 authid3 = "guest2"
2700
2701 # create group
2702 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2703
2704 # create subvolume in group
2705 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2706
2707 # authorize alice authID read-write access to subvolume
2708 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid1,
2709 "--group_name", group)
2710 # authorize guest1 authID read-write access to subvolume
2711 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid2,
2712 "--group_name", group)
2713 # authorize guest2 authID read access to subvolume
2714 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid3,
2715 "--group_name", group, "--access_level", "r")
2716
2717 # list authorized-ids of the subvolume
2718 expected_auth_list = [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
2719 auth_list = json.loads(self._fs_cmd('subvolume', 'authorized_list', self.volname, subvolume, "--group_name", group))
2720 self.assertCountEqual(expected_auth_list, auth_list)
2721
2722 # cleanup
2723 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid1,
2724 "--group_name", group)
2725 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid2,
2726 "--group_name", group)
2727 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid3,
2728 "--group_name", group)
2729 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2730 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2731
2732 def test_authorize_auth_id_not_created_by_mgr_volumes(self):
2733 """
2734 If the auth_id already exists and is not created by mgr plugin,
2735 it's not allowed to authorize the auth-id by default.
2736 """
2737
2738 subvolume = self._generate_random_subvolume_name()
2739 group = self._generate_random_group_name()
2740
2741 # Create auth_id
2742 self.fs.mon_manager.raw_cluster_cmd(
2743 "auth", "get-or-create", "client.guest1",
2744 "mds", "allow *",
2745 "osd", "allow rw",
2746 "mon", "allow *"
2747 )
2748
2749 auth_id = "guest1"
2750 guestclient_1 = {
2751 "auth_id": auth_id,
2752 "tenant_id": "tenant1",
2753 }
2754
2755 # create group
2756 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2757
2758 # create subvolume in group
2759 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2760
2761 try:
2762 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2763 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2764 except CommandFailedError as ce:
2765 self.assertEqual(ce.exitstatus, errno.EPERM,
2766 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
2767 else:
2768 self.fail("expected the 'fs subvolume authorize' command to fail")
2769
2770 # clean up
2771 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2772 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2773 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2774
2775 def test_authorize_allow_existing_id_option(self):
2776 """
2777 If the auth_id already exists and is not created by mgr volumes,
2778 it's not allowed to authorize the auth-id by default but is
2779 allowed with option allow_existing_id.
2780 """
2781
2782 subvolume = self._generate_random_subvolume_name()
2783 group = self._generate_random_group_name()
2784
2785 # Create auth_id
2786 self.fs.mon_manager.raw_cluster_cmd(
2787 "auth", "get-or-create", "client.guest1",
2788 "mds", "allow *",
2789 "osd", "allow rw",
2790 "mon", "allow *"
2791 )
2792
2793 auth_id = "guest1"
2794 guestclient_1 = {
2795 "auth_id": auth_id,
2796 "tenant_id": "tenant1",
2797 }
2798
2799 # create group
2800 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2801
2802 # create subvolume in group
2803 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2804
2805 # Cannot authorize 'guestclient_1' to access the volume by default,
2806 # which already exists and not created by mgr volumes but is allowed
2807 # with option 'allow_existing_id'.
2808 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2809 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"], "--allow-existing-id")
2810
2811 # clean up
2812 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
2813 "--group_name", group)
2814 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2815 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2816 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2817
2818 def test_deauthorize_auth_id_after_out_of_band_update(self):
2819 """
2820 If the auth_id authorized by mgr/volumes plugin is updated
2821 out of band, the auth_id should not be deleted after a
2822 deauthorize. It should only remove caps associated with it.
2823 """
2824
2825 subvolume = self._generate_random_subvolume_name()
2826 group = self._generate_random_group_name()
2827
2828 auth_id = "guest1"
2829 guestclient_1 = {
2830 "auth_id": auth_id,
2831 "tenant_id": "tenant1",
2832 }
2833
2834 # create group
2835 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2836
2837 # create subvolume in group
2838 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2839
2840 # Authorize 'guestclient_1' to access the subvolume.
2841 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2842 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2843
2844 subvol_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
2845 "--group_name", group).rstrip()
2846
2847 # Update caps for guestclient_1 out of band
2848 out = self.fs.mon_manager.raw_cluster_cmd(
2849 "auth", "caps", "client.guest1",
2850 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path),
2851 "osd", "allow rw pool=cephfs_data",
2852 "mon", "allow r",
2853 "mgr", "allow *"
2854 )
2855
2856 # Deauthorize guestclient_1
2857 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
2858
2859 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
2860 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
2861 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
2862
2863 self.assertEqual("client.guest1", out[0]["entity"])
2864 self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"])
2865 self.assertEqual("allow *", out[0]["caps"]["mgr"])
2866 self.assertNotIn("osd", out[0]["caps"])
2867
2868 # clean up
2869 out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2870 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2871 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2872
2873 def test_recover_auth_metadata_during_authorize(self):
2874 """
2875 That auth metadata manager can recover from partial auth updates using
2876 metadata files, which store auth info and its update status info. This
2877 test validates the recovery during authorize.
2878 """
2879
2880 guest_mount = self.mount_b
2881
2882 subvolume = self._generate_random_subvolume_name()
2883 group = self._generate_random_group_name()
2884
2885 auth_id = "guest1"
2886 guestclient_1 = {
2887 "auth_id": auth_id,
2888 "tenant_id": "tenant1",
2889 }
2890
2891 # create group
2892 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2893
2894 # create subvolume in group
2895 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2896
2897 # Authorize 'guestclient_1' to access the subvolume.
2898 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2899 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2900
2901 # Check that auth metadata file for auth ID 'guest1', is
2902 # created on authorizing 'guest1' access to the subvolume.
2903 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2904 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2905 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2906
2907 # Induce partial auth update state by modifying the auth metadata file,
2908 # and then run authorize again.
2909 guest_mount.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
2910
2911 # Authorize 'guestclient_1' to access the subvolume.
2912 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2913 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2914
2915 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2916 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
2917
2918 # clean up
2919 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
2920 guest_mount.umount_wait()
2921 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2922 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2923 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2924
2925 def test_recover_auth_metadata_during_deauthorize(self):
2926 """
2927 That auth metadata manager can recover from partial auth updates using
2928 metadata files, which store auth info and its update status info. This
2929 test validates the recovery during deauthorize.
2930 """
2931
2932 guest_mount = self.mount_b
2933
2934 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
2935 group = self._generate_random_group_name()
2936
2937 guestclient_1 = {
2938 "auth_id": "guest1",
2939 "tenant_id": "tenant1",
2940 }
2941
2942 # create group
2943 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2944
2945 # create subvolumes in group
2946 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
2947 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
2948
2949 # Authorize 'guestclient_1' to access the subvolume1.
2950 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
2951 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2952
2953 # Check that auth metadata file for auth ID 'guest1', is
2954 # created on authorizing 'guest1' access to the subvolume1.
2955 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2956 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2957 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2958
2959 # Authorize 'guestclient_1' to access the subvolume2.
2960 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
2961 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2962
2963 # Induce partial auth update state by modifying the auth metadata file,
2964 # and then run de-authorize.
2965 guest_mount.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
2966
2967 # Deauthorize 'guestclient_1' to access the subvolume2.
2968 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"],
2969 "--group_name", group)
2970
2971 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2972 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
2973
2974 # clean up
2975 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
2976 guest_mount.umount_wait()
2977 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2978 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
2979 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
2980 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2981
2982 def test_update_old_style_auth_metadata_to_new_during_authorize(self):
2983 """
2984 CephVolumeClient stores the subvolume data in auth metadata file with
2985 'volumes' key as there was no subvolume namespace. It doesn't makes sense
2986 with mgr/volumes. This test validates the transparent update of 'volumes'
2987 key to 'subvolumes' key in auth metadata file during authorize.
2988 """
2989
2990 guest_mount = self.mount_b
2991
2992 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
2993 group = self._generate_random_group_name()
2994
2995 auth_id = "guest1"
2996 guestclient_1 = {
2997 "auth_id": auth_id,
2998 "tenant_id": "tenant1",
2999 }
3000
3001 # create group
3002 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3003
3004 # create subvolumes in group
3005 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
3006 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
3007
3008 # Authorize 'guestclient_1' to access the subvolume1.
3009 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
3010 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3011
3012 # Check that auth metadata file for auth ID 'guest1', is
3013 # created on authorizing 'guest1' access to the subvolume1.
3014 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
3015 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
3016
3017 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
3018 guest_mount.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
3019
3020 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
3021 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
3022 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3023
3024 expected_auth_metadata = {
3025 "version": 5,
3026 "compat_version": 6,
3027 "dirty": False,
3028 "tenant_id": "tenant1",
3029 "subvolumes": {
3030 "{0}/{1}".format(group,subvolume1): {
3031 "dirty": False,
3032 "access_level": "rw"
3033 },
3034 "{0}/{1}".format(group,subvolume2): {
3035 "dirty": False,
3036 "access_level": "rw"
3037 }
3038 }
3039 }
3040
3041 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
3042
3043 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
3044 del expected_auth_metadata["version"]
3045 del auth_metadata["version"]
3046 self.assertEqual(expected_auth_metadata, auth_metadata)
3047
3048 # clean up
3049 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
3050 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
3051 guest_mount.umount_wait()
3052 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
3053 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
3054 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
3055 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3056
3057 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self):
3058 """
3059 CephVolumeClient stores the subvolume data in auth metadata file with
3060 'volumes' key as there was no subvolume namespace. It doesn't makes sense
3061 with mgr/volumes. This test validates the transparent update of 'volumes'
3062 key to 'subvolumes' key in auth metadata file during deauthorize.
3063 """
3064
3065 guest_mount = self.mount_b
3066
3067 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
3068 group = self._generate_random_group_name()
3069
3070 auth_id = "guest1"
3071 guestclient_1 = {
3072 "auth_id": auth_id,
3073 "tenant_id": "tenant1",
3074 }
3075
3076 # create group
3077 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3078
3079 # create subvolumes in group
3080 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
3081 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
3082
3083 # Authorize 'guestclient_1' to access the subvolume1.
3084 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
3085 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3086
3087 # Authorize 'guestclient_1' to access the subvolume2.
3088 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
3089 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3090
3091 # Check that auth metadata file for auth ID 'guest1', is created.
3092 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
3093 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
3094
3095 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
3096 guest_mount.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
3097
3098 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
3099 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
3100
3101 expected_auth_metadata = {
3102 "version": 5,
3103 "compat_version": 6,
3104 "dirty": False,
3105 "tenant_id": "tenant1",
3106 "subvolumes": {
3107 "{0}/{1}".format(group,subvolume1): {
3108 "dirty": False,
3109 "access_level": "rw"
3110 }
3111 }
3112 }
3113
3114 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
3115
3116 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
3117 del expected_auth_metadata["version"]
3118 del auth_metadata["version"]
3119 self.assertEqual(expected_auth_metadata, auth_metadata)
3120
3121 # clean up
3122 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
3123 guest_mount.umount_wait()
3124 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
3125 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
3126 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
3127 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3128
3129 def test_subvolume_evict_client(self):
3130 """
3131 That a subvolume client can be evicted based on the auth ID
3132 """
3133
3134 subvolumes = self._generate_random_subvolume_name(2)
3135 group = self._generate_random_group_name()
3136
3137 # create group
3138 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3139
3140 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
3141 for i in range(0, 2):
3142 self.mounts[i].umount_wait()
3143 guest_mounts = (self.mounts[0], self.mounts[1])
3144 auth_id = "guest"
3145 guestclient_1 = {
3146 "auth_id": auth_id,
3147 "tenant_id": "tenant1",
3148 }
3149
3150 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
3151 # subvolumes. Mount the two subvolumes. Write data to the volumes.
3152 for i in range(2):
3153 # Create subvolume.
3154 self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group, "--mode=777")
3155
3156 # authorize guest authID read-write access to subvolume
3157 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolumes[i], guestclient_1["auth_id"],
3158 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3159
3160 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolumes[i],
3161 "--group_name", group).rstrip()
3162 # configure credentials for guest client
3163 self._configure_guest_auth(guest_mounts[i], auth_id, key)
3164
3165 # mount the subvolume, and write to it
3166 guest_mounts[i].mount_wait(cephfs_mntpt=mount_path)
3167 guest_mounts[i].write_n_mb("data.bin", 1)
3168
3169 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
3170 # one volume.
3171 self._fs_cmd("subvolume", "evict", self.volname, subvolumes[0], auth_id, "--group_name", group)
3172
3173 # Evicted guest client, guest_mounts[0], should not be able to do
3174 # anymore metadata ops. It should start failing all operations
3175 # when it sees that its own address is in the blocklist.
3176 try:
3177 guest_mounts[0].write_n_mb("rogue.bin", 1)
3178 except CommandFailedError:
3179 pass
3180 else:
3181 raise RuntimeError("post-eviction write should have failed!")
3182
3183 # The blocklisted guest client should now be unmountable
3184 guest_mounts[0].umount_wait()
3185
3186 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
3187 # has mounted the other volume, should be able to use its volume
3188 # unaffected.
3189 guest_mounts[1].write_n_mb("data.bin.1", 1)
3190
3191 # Cleanup.
3192 guest_mounts[1].umount_wait()
3193 for i in range(2):
3194 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolumes[i], auth_id, "--group_name", group)
3195 self._fs_cmd("subvolume", "rm", self.volname, subvolumes[i], "--group_name", group)
3196 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3197
3198 def test_subvolume_pin_random(self):
3199 self.fs.set_max_mds(2)
3200 self.fs.wait_for_daemons()
3201 self.config_set('mds', 'mds_export_ephemeral_random', True)
3202
3203 subvolume = self._generate_random_subvolume_name()
3204 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3205 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01")
3206 # no verification
3207
3208 # remove subvolume
3209 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3210
3211 # verify trash dir is clean
3212 self._wait_for_trash_empty()
3213
3214 def test_subvolume_resize_fail_invalid_size(self):
3215 """
3216 That a subvolume cannot be resized to an invalid size and the quota did not change
3217 """
3218
3219 osize = self.DEFAULT_FILE_SIZE*1024*1024
3220 # create subvolume
3221 subvolname = self._generate_random_subvolume_name()
3222 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
3223
3224 # make sure it exists
3225 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3226 self.assertNotEqual(subvolpath, None)
3227
3228 # try to resize the subvolume with an invalid size -10
3229 nsize = -10
3230 try:
3231 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3232 except CommandFailedError as ce:
3233 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
3234 else:
3235 self.fail("expected the 'fs subvolume resize' command to fail")
3236
3237 # verify the quota did not change
3238 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3239 self.assertEqual(size, osize)
3240
3241 # remove subvolume
3242 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3243
3244 # verify trash dir is clean
3245 self._wait_for_trash_empty()
3246
3247 def test_subvolume_resize_fail_zero_size(self):
3248 """
3249 That a subvolume cannot be resized to a zero size and the quota did not change
3250 """
3251
3252 osize = self.DEFAULT_FILE_SIZE*1024*1024
3253 # create subvolume
3254 subvolname = self._generate_random_subvolume_name()
3255 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
3256
3257 # make sure it exists
3258 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3259 self.assertNotEqual(subvolpath, None)
3260
3261 # try to resize the subvolume with size 0
3262 nsize = 0
3263 try:
3264 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3265 except CommandFailedError as ce:
3266 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
3267 else:
3268 self.fail("expected the 'fs subvolume resize' command to fail")
3269
3270 # verify the quota did not change
3271 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3272 self.assertEqual(size, osize)
3273
3274 # remove subvolume
3275 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3276
3277 # verify trash dir is clean
3278 self._wait_for_trash_empty()
3279
3280 def test_subvolume_resize_quota_lt_used_size(self):
3281 """
3282 That a subvolume can be resized to a size smaller than the current used size
3283 and the resulting quota matches the expected size.
3284 """
3285
3286 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
3287 # create subvolume
3288 subvolname = self._generate_random_subvolume_name()
3289 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
3290
3291 # make sure it exists
3292 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3293 self.assertNotEqual(subvolpath, None)
3294
3295 # create one file of 10MB
3296 file_size=self.DEFAULT_FILE_SIZE*10
3297 number_of_files=1
3298 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3299 number_of_files,
3300 file_size))
3301 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
3302 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3303
3304 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
3305 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
3306 if isinstance(self.mount_a, FuseMount):
3307 # kclient dir does not have size==rbytes
3308 self.assertEqual(usedsize, susedsize)
3309
3310 # shrink the subvolume
3311 nsize = usedsize // 2
3312 try:
3313 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3314 except CommandFailedError:
3315 self.fail("expected the 'fs subvolume resize' command to succeed")
3316
3317 # verify the quota
3318 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3319 self.assertEqual(size, nsize)
3320
3321 # remove subvolume
3322 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3323
3324 # verify trash dir is clean
3325 self._wait_for_trash_empty()
3326
3327 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self):
3328 """
3329 That a subvolume cannot be resized to a size smaller than the current used size
3330 when --no_shrink is given and the quota did not change.
3331 """
3332
3333 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
3334 # create subvolume
3335 subvolname = self._generate_random_subvolume_name()
3336 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
3337
3338 # make sure it exists
3339 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3340 self.assertNotEqual(subvolpath, None)
3341
3342 # create one file of 10MB
3343 file_size=self.DEFAULT_FILE_SIZE*10
3344 number_of_files=1
3345 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3346 number_of_files,
3347 file_size))
3348 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
3349 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3350
3351 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
3352 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
3353 if isinstance(self.mount_a, FuseMount):
3354 # kclient dir does not have size==rbytes
3355 self.assertEqual(usedsize, susedsize)
3356
3357 # shrink the subvolume
3358 nsize = usedsize // 2
3359 try:
3360 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink")
3361 except CommandFailedError as ce:
3362 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
3363 else:
3364 self.fail("expected the 'fs subvolume resize' command to fail")
3365
3366 # verify the quota did not change
3367 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3368 self.assertEqual(size, osize)
3369
3370 # remove subvolume
3371 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3372
3373 # verify trash dir is clean
3374 self._wait_for_trash_empty()
3375
3376 def test_subvolume_resize_expand_on_full_subvolume(self):
3377 """
3378 That the subvolume can be expanded from a full subvolume and future writes succeed.
3379 """
3380
3381 osize = self.DEFAULT_FILE_SIZE*1024*1024*10
3382 # create subvolume of quota 10MB and make sure it exists
3383 subvolname = self._generate_random_subvolume_name()
3384 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
3385 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3386 self.assertNotEqual(subvolpath, None)
3387
3388 # create one file of size 10MB and write
3389 file_size=self.DEFAULT_FILE_SIZE*10
3390 number_of_files=1
3391 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3392 number_of_files,
3393 file_size))
3394 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3)
3395 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3396
3397 # create a file of size 5MB and try write more
3398 file_size=file_size // 2
3399 number_of_files=1
3400 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3401 number_of_files,
3402 file_size))
3403 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4)
3404 try:
3405 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3406 except CommandFailedError:
3407 # Not able to write. So expand the subvolume more and try writing the 5MB file again
3408 nsize = osize*2
3409 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3410 try:
3411 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3412 except CommandFailedError:
3413 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3414 "to succeed".format(subvolname, number_of_files, file_size))
3415 else:
3416 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3417 "to fail".format(subvolname, number_of_files, file_size))
3418
3419 # remove subvolume
3420 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3421
3422 # verify trash dir is clean
3423 self._wait_for_trash_empty()
3424
3425 def test_subvolume_resize_infinite_size(self):
3426 """
3427 That a subvolume can be resized to an infinite size by unsetting its quota.
3428 """
3429
3430 # create subvolume
3431 subvolname = self._generate_random_subvolume_name()
3432 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
3433 str(self.DEFAULT_FILE_SIZE*1024*1024))
3434
3435 # make sure it exists
3436 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3437 self.assertNotEqual(subvolpath, None)
3438
3439 # resize inf
3440 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
3441
3442 # verify that the quota is None
3443 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
3444 self.assertEqual(size, None)
3445
3446 # remove subvolume
3447 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3448
3449 # verify trash dir is clean
3450 self._wait_for_trash_empty()
3451
3452 def test_subvolume_resize_infinite_size_future_writes(self):
3453 """
3454 That a subvolume can be resized to an infinite size and the future writes succeed.
3455 """
3456
3457 # create subvolume
3458 subvolname = self._generate_random_subvolume_name()
3459 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
3460 str(self.DEFAULT_FILE_SIZE*1024*1024*5), "--mode=777")
3461
3462 # make sure it exists
3463 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3464 self.assertNotEqual(subvolpath, None)
3465
3466 # resize inf
3467 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
3468
3469 # verify that the quota is None
3470 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
3471 self.assertEqual(size, None)
3472
3473 # create one file of 10MB and try to write
3474 file_size=self.DEFAULT_FILE_SIZE*10
3475 number_of_files=1
3476 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3477 number_of_files,
3478 file_size))
3479 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5)
3480
3481 try:
3482 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3483 except CommandFailedError:
3484 self.fail("expected filling subvolume {0} with {1} file of size {2}MB "
3485 "to succeed".format(subvolname, number_of_files, file_size))
3486
3487 # remove subvolume
3488 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3489
3490 # verify trash dir is clean
3491 self._wait_for_trash_empty()
3492
3493 def test_subvolume_rm_force(self):
3494 # test removing non-existing subvolume with --force
3495 subvolume = self._generate_random_subvolume_name()
3496 try:
3497 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
3498 except CommandFailedError:
3499 self.fail("expected the 'fs subvolume rm --force' command to succeed")
3500
3501 def test_subvolume_exists_with_subvolumegroup_and_subvolume(self):
3502 """Test the presence of any subvolume by specifying the name of subvolumegroup"""
3503
3504 group = self._generate_random_group_name()
3505 subvolume1 = self._generate_random_subvolume_name()
3506 # create subvolumegroup
3507 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3508 # create subvolume in group
3509 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
3510 ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group)
3511 self.assertEqual(ret.strip('\n'), "subvolume exists")
3512 # delete subvolume in group
3513 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
3514 ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group)
3515 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3516 # delete subvolumegroup
3517 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3518
3519 def test_subvolume_exists_with_subvolumegroup_and_no_subvolume(self):
3520 """Test the presence of any subvolume specifying the name
3521 of subvolumegroup and no subvolumes"""
3522
3523 group = self._generate_random_group_name()
3524 # create subvolumegroup
3525 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3526 ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group)
3527 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3528 # delete subvolumegroup
3529 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3530
3531 def test_subvolume_exists_without_subvolumegroup_and_with_subvolume(self):
3532 """Test the presence of any subvolume without specifying the name
3533 of subvolumegroup"""
3534
3535 subvolume1 = self._generate_random_subvolume_name()
3536 # create subvolume
3537 self._fs_cmd("subvolume", "create", self.volname, subvolume1)
3538 ret = self._fs_cmd("subvolume", "exist", self.volname)
3539 self.assertEqual(ret.strip('\n'), "subvolume exists")
3540 # delete subvolume
3541 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
3542 ret = self._fs_cmd("subvolume", "exist", self.volname)
3543 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3544
3545 def test_subvolume_exists_without_subvolumegroup_and_without_subvolume(self):
3546 """Test the presence of any subvolume without any subvolumegroup
3547 and without any subvolume"""
3548
3549 ret = self._fs_cmd("subvolume", "exist", self.volname)
3550 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3551
3552 def test_subvolume_shrink(self):
3553 """
3554 That a subvolume can be shrinked in size and its quota matches the expected size.
3555 """
3556
3557 # create subvolume
3558 subvolname = self._generate_random_subvolume_name()
3559 osize = self.DEFAULT_FILE_SIZE*1024*1024
3560 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
3561
3562 # make sure it exists
3563 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3564 self.assertNotEqual(subvolpath, None)
3565
3566 # shrink the subvolume
3567 nsize = osize // 2
3568 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3569
3570 # verify the quota
3571 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3572 self.assertEqual(size, nsize)
3573
3574 # remove subvolume
3575 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3576
3577 # verify trash dir is clean
3578 self._wait_for_trash_empty()
3579
3580 def test_subvolume_retain_snapshot_rm_idempotency(self):
3581 """
3582 ensure subvolume deletion of a subvolume which is already deleted with retain snapshots option passes.
3583 After subvolume deletion with retain snapshots, the subvolume exists until the trash directory (resides inside subvolume)
3584 is cleaned up. The subvolume deletion issued while the trash directory is not empty, should pass and should
3585 not error out with EAGAIN.
3586 """
3587 subvolume = self._generate_random_subvolume_name()
3588 snapshot = self._generate_random_snapshot_name()
3589
3590 # create subvolume
3591 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
3592
3593 # do some IO
3594 self._do_subvolume_io(subvolume, number_of_files=256)
3595
3596 # snapshot subvolume
3597 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3598
3599 # remove with snapshot retention
3600 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3601
3602 # remove snapshots (removes retained volume)
3603 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3604
3605 # remove subvolume (check idempotency)
3606 try:
3607 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3608 except CommandFailedError as ce:
3609 if ce.exitstatus != errno.ENOENT:
3610 self.fail(f"expected subvolume rm to pass with error: {os.strerror(ce.exitstatus)}")
3611
3612 # verify trash dir is clean
3613 self._wait_for_trash_empty()
3614
3615
3616 def test_subvolume_user_metadata_set(self):
3617 subvolname = self._generate_random_subvolume_name()
3618 group = self._generate_random_group_name()
3619
3620 # create group.
3621 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3622
3623 # create subvolume in group.
3624 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3625
3626 # set metadata for subvolume.
3627 key = "key"
3628 value = "value"
3629 try:
3630 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3631 except CommandFailedError:
3632 self.fail("expected the 'fs subvolume metadata set' command to succeed")
3633
3634 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3635 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3636
3637 # verify trash dir is clean.
3638 self._wait_for_trash_empty()
3639
3640 def test_subvolume_user_metadata_set_idempotence(self):
3641 subvolname = self._generate_random_subvolume_name()
3642 group = self._generate_random_group_name()
3643
3644 # create group.
3645 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3646
3647 # create subvolume in group.
3648 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3649
3650 # set metadata for subvolume.
3651 key = "key"
3652 value = "value"
3653 try:
3654 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3655 except CommandFailedError:
3656 self.fail("expected the 'fs subvolume metadata set' command to succeed")
3657
3658 # set same metadata again for subvolume.
3659 try:
3660 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3661 except CommandFailedError:
3662 self.fail("expected the 'fs subvolume metadata set' command to succeed because it is idempotent operation")
3663
3664 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3665 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3666
3667 # verify trash dir is clean.
3668 self._wait_for_trash_empty()
3669
3670 def test_subvolume_user_metadata_get(self):
3671 subvolname = self._generate_random_subvolume_name()
3672 group = self._generate_random_group_name()
3673
3674 # create group.
3675 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3676
3677 # create subvolume in group.
3678 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3679
3680 # set metadata for subvolume.
3681 key = "key"
3682 value = "value"
3683 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3684
3685 # get value for specified key.
3686 try:
3687 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3688 except CommandFailedError:
3689 self.fail("expected the 'fs subvolume metadata get' command to succeed")
3690
3691 # remove '\n' from returned value.
3692 ret = ret.strip('\n')
3693
3694 # match received value with expected value.
3695 self.assertEqual(value, ret)
3696
3697 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3698 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3699
3700 # verify trash dir is clean.
3701 self._wait_for_trash_empty()
3702
3703 def test_subvolume_user_metadata_get_for_nonexisting_key(self):
3704 subvolname = self._generate_random_subvolume_name()
3705 group = self._generate_random_group_name()
3706
3707 # create group.
3708 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3709
3710 # create subvolume in group.
3711 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3712
3713 # set metadata for subvolume.
3714 key = "key"
3715 value = "value"
3716 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3717
3718 # try to get value for nonexisting key
3719 # Expecting ENOENT exit status because key does not exist
3720 try:
3721 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key_nonexist", "--group_name", group)
3722 except CommandFailedError as e:
3723 self.assertEqual(e.exitstatus, errno.ENOENT)
3724 else:
3725 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
3726
3727 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3728 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3729
3730 # verify trash dir is clean.
3731 self._wait_for_trash_empty()
3732
3733 def test_subvolume_user_metadata_get_for_nonexisting_section(self):
3734 subvolname = self._generate_random_subvolume_name()
3735 group = self._generate_random_group_name()
3736
3737 # create group.
3738 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3739
3740 # create subvolume in group.
3741 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3742
3743 # try to get value for nonexisting key (as section does not exist)
3744 # Expecting ENOENT exit status because key does not exist
3745 try:
3746 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key", "--group_name", group)
3747 except CommandFailedError as e:
3748 self.assertEqual(e.exitstatus, errno.ENOENT)
3749 else:
3750 self.fail("Expected ENOENT because section does not exist")
3751
3752 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3753 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3754
3755 # verify trash dir is clean.
3756 self._wait_for_trash_empty()
3757
3758 def test_subvolume_user_metadata_update(self):
3759 subvolname = self._generate_random_subvolume_name()
3760 group = self._generate_random_group_name()
3761
3762 # create group.
3763 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3764
3765 # create subvolume in group.
3766 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3767
3768 # set metadata for subvolume.
3769 key = "key"
3770 value = "value"
3771 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3772
3773 # update metadata against key.
3774 new_value = "new_value"
3775 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, new_value, "--group_name", group)
3776
3777 # get metadata for specified key of subvolume.
3778 try:
3779 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3780 except CommandFailedError:
3781 self.fail("expected the 'fs subvolume metadata get' command to succeed")
3782
3783 # remove '\n' from returned value.
3784 ret = ret.strip('\n')
3785
3786 # match received value with expected value.
3787 self.assertEqual(new_value, ret)
3788
3789 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3790 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3791
3792 # verify trash dir is clean.
3793 self._wait_for_trash_empty()
3794
3795 def test_subvolume_user_metadata_list(self):
3796 subvolname = self._generate_random_subvolume_name()
3797 group = self._generate_random_group_name()
3798
3799 # create group.
3800 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3801
3802 # create subvolume in group.
3803 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3804
3805 # set metadata for subvolume.
3806 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
3807
3808 for k, v in input_metadata_dict.items():
3809 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, k, v, "--group_name", group)
3810
3811 # list metadata
3812 try:
3813 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
3814 except CommandFailedError:
3815 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
3816
3817 ret_dict = json.loads(ret)
3818
3819 # compare output with expected output
3820 self.assertDictEqual(input_metadata_dict, ret_dict)
3821
3822 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3823 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3824
3825 # verify trash dir is clean.
3826 self._wait_for_trash_empty()
3827
3828 def test_subvolume_user_metadata_list_if_no_metadata_set(self):
3829 subvolname = self._generate_random_subvolume_name()
3830 group = self._generate_random_group_name()
3831
3832 # create group.
3833 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3834
3835 # create subvolume in group.
3836 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3837
3838 # list metadata
3839 try:
3840 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
3841 except CommandFailedError:
3842 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
3843
3844 # remove '\n' from returned value.
3845 ret = ret.strip('\n')
3846
3847 # compare output with expected output
3848 # expecting empty json/dictionary
3849 self.assertEqual(ret, "{}")
3850
3851 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3852 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3853
3854 # verify trash dir is clean.
3855 self._wait_for_trash_empty()
3856
3857 def test_subvolume_user_metadata_remove(self):
3858 subvolname = self._generate_random_subvolume_name()
3859 group = self._generate_random_group_name()
3860
3861 # create group.
3862 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3863
3864 # create subvolume in group.
3865 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3866
3867 # set metadata for subvolume.
3868 key = "key"
3869 value = "value"
3870 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3871
3872 # remove metadata against specified key.
3873 try:
3874 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group)
3875 except CommandFailedError:
3876 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
3877
3878 # confirm key is removed by again fetching metadata
3879 try:
3880 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3881 except CommandFailedError as e:
3882 self.assertEqual(e.exitstatus, errno.ENOENT)
3883 else:
3884 self.fail("Expected ENOENT because key does not exist")
3885
3886 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3887 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3888
3889 # verify trash dir is clean.
3890 self._wait_for_trash_empty()
3891
3892 def test_subvolume_user_metadata_remove_for_nonexisting_key(self):
3893 subvolname = self._generate_random_subvolume_name()
3894 group = self._generate_random_group_name()
3895
3896 # create group.
3897 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3898
3899 # create subvolume in group.
3900 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3901
3902 # set metadata for subvolume.
3903 key = "key"
3904 value = "value"
3905 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3906
3907 # try to remove value for nonexisting key
3908 # Expecting ENOENT exit status because key does not exist
3909 try:
3910 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key_nonexist", "--group_name", group)
3911 except CommandFailedError as e:
3912 self.assertEqual(e.exitstatus, errno.ENOENT)
3913 else:
3914 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
3915
3916 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3917 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3918
3919 # verify trash dir is clean.
3920 self._wait_for_trash_empty()
3921
3922 def test_subvolume_user_metadata_remove_for_nonexisting_section(self):
3923 subvolname = self._generate_random_subvolume_name()
3924 group = self._generate_random_group_name()
3925
3926 # create group.
3927 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3928
3929 # create subvolume in group.
3930 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3931
3932 # try to remove value for nonexisting key (as section does not exist)
3933 # Expecting ENOENT exit status because key does not exist
3934 try:
3935 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key", "--group_name", group)
3936 except CommandFailedError as e:
3937 self.assertEqual(e.exitstatus, errno.ENOENT)
3938 else:
3939 self.fail("Expected ENOENT because section does not exist")
3940
3941 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3942 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3943
3944 # verify trash dir is clean.
3945 self._wait_for_trash_empty()
3946
3947 def test_subvolume_user_metadata_remove_force(self):
3948 subvolname = self._generate_random_subvolume_name()
3949 group = self._generate_random_group_name()
3950
3951 # create group.
3952 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3953
3954 # create subvolume in group.
3955 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3956
3957 # set metadata for subvolume.
3958 key = "key"
3959 value = "value"
3960 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3961
3962 # remove metadata against specified key with --force option.
3963 try:
3964 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group, "--force")
3965 except CommandFailedError:
3966 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
3967
3968 # confirm key is removed by again fetching metadata
3969 try:
3970 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3971 except CommandFailedError as e:
3972 self.assertEqual(e.exitstatus, errno.ENOENT)
3973 else:
3974 self.fail("Expected ENOENT because key does not exist")
3975
3976 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3977 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3978
3979 # verify trash dir is clean.
3980 self._wait_for_trash_empty()
3981
3982 def test_subvolume_user_metadata_remove_force_for_nonexisting_key(self):
3983 subvolname = self._generate_random_subvolume_name()
3984 group = self._generate_random_group_name()
3985
3986 # create group.
3987 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3988
3989 # create subvolume in group.
3990 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3991
3992 # set metadata for subvolume.
3993 key = "key"
3994 value = "value"
3995 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3996
3997 # remove metadata against specified key.
3998 try:
3999 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group)
4000 except CommandFailedError:
4001 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
4002
4003 # confirm key is removed by again fetching metadata
4004 try:
4005 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
4006 except CommandFailedError as e:
4007 self.assertEqual(e.exitstatus, errno.ENOENT)
4008 else:
4009 self.fail("Expected ENOENT because key does not exist")
4010
4011 # again remove metadata against already removed key with --force option.
4012 try:
4013 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group, "--force")
4014 except CommandFailedError:
4015 self.fail("expected the 'fs subvolume metadata rm' (with --force) command to succeed")
4016
4017 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4018 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4019
4020 # verify trash dir is clean.
4021 self._wait_for_trash_empty()
4022
4023 def test_subvolume_user_metadata_set_and_get_for_legacy_subvolume(self):
4024 subvolname = self._generate_random_subvolume_name()
4025 group = self._generate_random_group_name()
4026
4027 # emulate a old-fashioned subvolume in a custom group
4028 createpath = os.path.join(".", "volumes", group, subvolname)
4029 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
4030
4031 # set metadata for subvolume.
4032 key = "key"
4033 value = "value"
4034 try:
4035 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
4036 except CommandFailedError:
4037 self.fail("expected the 'fs subvolume metadata set' command to succeed")
4038
4039 # get value for specified key.
4040 try:
4041 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
4042 except CommandFailedError:
4043 self.fail("expected the 'fs subvolume metadata get' command to succeed")
4044
4045 # remove '\n' from returned value.
4046 ret = ret.strip('\n')
4047
4048 # match received value with expected value.
4049 self.assertEqual(value, ret)
4050
4051 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4052 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4053
4054 # verify trash dir is clean.
4055 self._wait_for_trash_empty()
4056
4057 def test_subvolume_user_metadata_list_and_remove_for_legacy_subvolume(self):
4058 subvolname = self._generate_random_subvolume_name()
4059 group = self._generate_random_group_name()
4060
4061 # emulate a old-fashioned subvolume in a custom group
4062 createpath = os.path.join(".", "volumes", group, subvolname)
4063 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
4064
4065 # set metadata for subvolume.
4066 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
4067
4068 for k, v in input_metadata_dict.items():
4069 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, k, v, "--group_name", group)
4070
4071 # list metadata
4072 try:
4073 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
4074 except CommandFailedError:
4075 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
4076
4077 ret_dict = json.loads(ret)
4078
4079 # compare output with expected output
4080 self.assertDictEqual(input_metadata_dict, ret_dict)
4081
4082 # remove metadata against specified key.
4083 try:
4084 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key_1", "--group_name", group)
4085 except CommandFailedError:
4086 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
4087
4088 # confirm key is removed by again fetching metadata
4089 try:
4090 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key_1", "--group_name", group)
4091 except CommandFailedError as e:
4092 self.assertEqual(e.exitstatus, errno.ENOENT)
4093 else:
4094 self.fail("Expected ENOENT because key_1 does not exist")
4095
4096 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4097 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4098
4099 # verify trash dir is clean.
4100 self._wait_for_trash_empty()
4101
4102 class TestSubvolumeGroupSnapshots(TestVolumesHelper):
4103 """Tests for FS subvolume group snapshot operations."""
4104 @unittest.skip("skipping subvolumegroup snapshot tests")
4105 def test_nonexistent_subvolume_group_snapshot_rm(self):
4106 subvolume = self._generate_random_subvolume_name()
4107 group = self._generate_random_group_name()
4108 snapshot = self._generate_random_snapshot_name()
4109
4110 # create group
4111 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4112
4113 # create subvolume in group
4114 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4115
4116 # snapshot group
4117 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4118
4119 # remove snapshot
4120 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4121
4122 # remove snapshot
4123 try:
4124 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4125 except CommandFailedError as ce:
4126 if ce.exitstatus != errno.ENOENT:
4127 raise
4128 else:
4129 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
4130
4131 # remove subvolume
4132 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4133
4134 # verify trash dir is clean
4135 self._wait_for_trash_empty()
4136
4137 # remove group
4138 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4139
4140 @unittest.skip("skipping subvolumegroup snapshot tests")
4141 def test_subvolume_group_snapshot_create_and_rm(self):
4142 subvolume = self._generate_random_subvolume_name()
4143 group = self._generate_random_group_name()
4144 snapshot = self._generate_random_snapshot_name()
4145
4146 # create group
4147 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4148
4149 # create subvolume in group
4150 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4151
4152 # snapshot group
4153 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4154
4155 # remove snapshot
4156 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4157
4158 # remove subvolume
4159 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4160
4161 # verify trash dir is clean
4162 self._wait_for_trash_empty()
4163
4164 # remove group
4165 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4166
4167 @unittest.skip("skipping subvolumegroup snapshot tests")
4168 def test_subvolume_group_snapshot_idempotence(self):
4169 subvolume = self._generate_random_subvolume_name()
4170 group = self._generate_random_group_name()
4171 snapshot = self._generate_random_snapshot_name()
4172
4173 # create group
4174 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4175
4176 # create subvolume in group
4177 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4178
4179 # snapshot group
4180 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4181
4182 # try creating snapshot w/ same snapshot name -- shoule be idempotent
4183 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4184
4185 # remove snapshot
4186 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4187
4188 # remove subvolume
4189 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4190
4191 # verify trash dir is clean
4192 self._wait_for_trash_empty()
4193
4194 # remove group
4195 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4196
4197 @unittest.skip("skipping subvolumegroup snapshot tests")
4198 def test_subvolume_group_snapshot_ls(self):
4199 # tests the 'fs subvolumegroup snapshot ls' command
4200
4201 snapshots = []
4202
4203 # create group
4204 group = self._generate_random_group_name()
4205 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4206
4207 # create subvolumegroup snapshots
4208 snapshots = self._generate_random_snapshot_name(3)
4209 for snapshot in snapshots:
4210 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4211
4212 subvolgrpsnapshotls = json.loads(self._fs_cmd('subvolumegroup', 'snapshot', 'ls', self.volname, group))
4213 if len(subvolgrpsnapshotls) == 0:
4214 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
4215 else:
4216 snapshotnames = [snapshot['name'] for snapshot in subvolgrpsnapshotls]
4217 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
4218 raise RuntimeError("Error creating or listing subvolume group snapshots")
4219
4220 @unittest.skip("skipping subvolumegroup snapshot tests")
4221 def test_subvolume_group_snapshot_rm_force(self):
4222 # test removing non-existing subvolume group snapshot with --force
4223 group = self._generate_random_group_name()
4224 snapshot = self._generate_random_snapshot_name()
4225 # remove snapshot
4226 try:
4227 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force")
4228 except CommandFailedError:
4229 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
4230
4231 def test_subvolume_group_snapshot_unsupported_status(self):
4232 group = self._generate_random_group_name()
4233 snapshot = self._generate_random_snapshot_name()
4234
4235 # create group
4236 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4237
4238 # snapshot group
4239 try:
4240 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4241 except CommandFailedError as ce:
4242 self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create")
4243 else:
4244 self.fail("expected subvolumegroup snapshot create command to fail")
4245
4246 # remove group
4247 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4248
4249
4250 class TestSubvolumeSnapshots(TestVolumesHelper):
4251 """Tests for FS subvolume snapshot operations."""
4252 def test_nonexistent_subvolume_snapshot_rm(self):
4253 subvolume = self._generate_random_subvolume_name()
4254 snapshot = self._generate_random_snapshot_name()
4255
4256 # create subvolume
4257 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4258
4259 # snapshot subvolume
4260 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4261
4262 # remove snapshot
4263 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4264
4265 # remove snapshot again
4266 try:
4267 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4268 except CommandFailedError as ce:
4269 if ce.exitstatus != errno.ENOENT:
4270 raise
4271 else:
4272 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
4273
4274 # remove subvolume
4275 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4276
4277 # verify trash dir is clean
4278 self._wait_for_trash_empty()
4279
4280 def test_subvolume_snapshot_create_and_rm(self):
4281 subvolume = self._generate_random_subvolume_name()
4282 snapshot = self._generate_random_snapshot_name()
4283
4284 # create subvolume
4285 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4286
4287 # snapshot subvolume
4288 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4289
4290 # remove snapshot
4291 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4292
4293 # remove subvolume
4294 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4295
4296 # verify trash dir is clean
4297 self._wait_for_trash_empty()
4298
4299 def test_subvolume_snapshot_create_idempotence(self):
4300 subvolume = self._generate_random_subvolume_name()
4301 snapshot = self._generate_random_snapshot_name()
4302
4303 # create subvolume
4304 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4305
4306 # snapshot subvolume
4307 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4308
4309 # try creating w/ same subvolume snapshot name -- should be idempotent
4310 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4311
4312 # remove snapshot
4313 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4314
4315 # remove subvolume
4316 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4317
4318 # verify trash dir is clean
4319 self._wait_for_trash_empty()
4320
4321 def test_subvolume_snapshot_info(self):
4322
4323 """
4324 tests the 'fs subvolume snapshot info' command
4325 """
4326
4327 snap_md = ["created_at", "data_pool", "has_pending_clones"]
4328
4329 subvolume = self._generate_random_subvolume_name()
4330 snapshot, snap_missing = self._generate_random_snapshot_name(2)
4331
4332 # create subvolume
4333 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
4334
4335 # do some IO
4336 self._do_subvolume_io(subvolume, number_of_files=1)
4337
4338 # snapshot subvolume
4339 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4340
4341 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
4342 for md in snap_md:
4343 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4344 self.assertEqual(snap_info["has_pending_clones"], "no")
4345
4346 # snapshot info for non-existent snapshot
4347 try:
4348 self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing)
4349 except CommandFailedError as ce:
4350 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot")
4351 else:
4352 self.fail("expected snapshot info of non-existent snapshot to fail")
4353
4354 # remove snapshot
4355 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4356
4357 # remove subvolume
4358 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4359
4360 # verify trash dir is clean
4361 self._wait_for_trash_empty()
4362
4363 def test_subvolume_snapshot_in_group(self):
4364 subvolume = self._generate_random_subvolume_name()
4365 group = self._generate_random_group_name()
4366 snapshot = self._generate_random_snapshot_name()
4367
4368 # create group
4369 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4370
4371 # create subvolume in group
4372 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4373
4374 # snapshot subvolume in group
4375 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
4376
4377 # remove snapshot
4378 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
4379
4380 # remove subvolume
4381 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4382
4383 # verify trash dir is clean
4384 self._wait_for_trash_empty()
4385
4386 # remove group
4387 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4388
4389 def test_subvolume_snapshot_ls(self):
4390 # tests the 'fs subvolume snapshot ls' command
4391
4392 snapshots = []
4393
4394 # create subvolume
4395 subvolume = self._generate_random_subvolume_name()
4396 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4397
4398 # create subvolume snapshots
4399 snapshots = self._generate_random_snapshot_name(3)
4400 for snapshot in snapshots:
4401 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4402
4403 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
4404 if len(subvolsnapshotls) == 0:
4405 self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
4406 else:
4407 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
4408 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
4409 self.fail("Error creating or listing subvolume snapshots")
4410
4411 # remove snapshot
4412 for snapshot in snapshots:
4413 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4414
4415 # remove subvolume
4416 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4417
4418 # verify trash dir is clean
4419 self._wait_for_trash_empty()
4420
4421 def test_subvolume_inherited_snapshot_ls(self):
4422 # tests the scenario where 'fs subvolume snapshot ls' command
4423 # should not list inherited snapshots created as part of snapshot
4424 # at ancestral level
4425
4426 snapshots = []
4427 subvolume = self._generate_random_subvolume_name()
4428 group = self._generate_random_group_name()
4429 snap_count = 3
4430
4431 # create group
4432 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4433
4434 # create subvolume in group
4435 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4436
4437 # create subvolume snapshots
4438 snapshots = self._generate_random_snapshot_name(snap_count)
4439 for snapshot in snapshots:
4440 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
4441
4442 # Create snapshot at ancestral level
4443 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1")
4444 ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2")
4445 self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1, ancestral_snappath2], omit_sudo=False)
4446
4447 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group))
4448 self.assertEqual(len(subvolsnapshotls), snap_count)
4449
4450 # remove ancestral snapshots
4451 self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1, ancestral_snappath2], omit_sudo=False)
4452
4453 # remove snapshot
4454 for snapshot in snapshots:
4455 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
4456
4457 # remove subvolume
4458 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4459
4460 # verify trash dir is clean
4461 self._wait_for_trash_empty()
4462
4463 # remove group
4464 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4465
4466 def test_subvolume_inherited_snapshot_info(self):
4467 """
4468 tests the scenario where 'fs subvolume snapshot info' command
4469 should fail for inherited snapshots created as part of snapshot
4470 at ancestral level
4471 """
4472
4473 subvolume = self._generate_random_subvolume_name()
4474 group = self._generate_random_group_name()
4475
4476 # create group
4477 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4478
4479 # create subvolume in group
4480 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4481
4482 # Create snapshot at ancestral level
4483 ancestral_snap_name = "ancestral_snap_1"
4484 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
4485 self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1], omit_sudo=False)
4486
4487 # Validate existence of inherited snapshot
4488 group_path = os.path.join(".", "volumes", group)
4489 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
4490 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
4491 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
4492 self.mount_a.run_shell(['ls', inherited_snappath])
4493
4494 # snapshot info on inherited snapshot
4495 try:
4496 self._get_subvolume_snapshot_info(self.volname, subvolume, inherited_snap, group)
4497 except CommandFailedError as ce:
4498 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on snapshot info of inherited snapshot")
4499 else:
4500 self.fail("expected snapshot info of inherited snapshot to fail")
4501
4502 # remove ancestral snapshots
4503 self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1], omit_sudo=False)
4504
4505 # remove subvolume
4506 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
4507
4508 # verify trash dir is clean
4509 self._wait_for_trash_empty()
4510
4511 # remove group
4512 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4513
4514 def test_subvolume_inherited_snapshot_rm(self):
4515 """
4516 tests the scenario where 'fs subvolume snapshot rm' command
4517 should fail for inherited snapshots created as part of snapshot
4518 at ancestral level
4519 """
4520
4521 subvolume = self._generate_random_subvolume_name()
4522 group = self._generate_random_group_name()
4523
4524 # create group
4525 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4526
4527 # create subvolume in group
4528 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4529
4530 # Create snapshot at ancestral level
4531 ancestral_snap_name = "ancestral_snap_1"
4532 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
4533 self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1], omit_sudo=False)
4534
4535 # Validate existence of inherited snap
4536 group_path = os.path.join(".", "volumes", group)
4537 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
4538 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
4539 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
4540 self.mount_a.run_shell(['ls', inherited_snappath])
4541
4542 # inherited snapshot should not be deletable
4543 try:
4544 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, inherited_snap, "--group_name", group)
4545 except CommandFailedError as ce:
4546 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when removing inherited snapshot")
4547 else:
4548 self.fail("expected removing inheirted snapshot to fail")
4549
4550 # remove ancestral snapshots
4551 self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1], omit_sudo=False)
4552
4553 # remove subvolume
4554 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4555
4556 # verify trash dir is clean
4557 self._wait_for_trash_empty()
4558
4559 # remove group
4560 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4561
4562 def test_subvolume_subvolumegroup_snapshot_name_conflict(self):
4563 """
4564 tests the scenario where creation of subvolume snapshot name
4565 with same name as it's subvolumegroup snapshot name. This should
4566 fail.
4567 """
4568
4569 subvolume = self._generate_random_subvolume_name()
4570 group = self._generate_random_group_name()
4571 group_snapshot = self._generate_random_snapshot_name()
4572
4573 # create group
4574 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4575
4576 # create subvolume in group
4577 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4578
4579 # Create subvolumegroup snapshot
4580 group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot)
4581 self.mount_a.run_shell(['sudo', 'mkdir', '-p', group_snapshot_path], omit_sudo=False)
4582
4583 # Validate existence of subvolumegroup snapshot
4584 self.mount_a.run_shell(['ls', group_snapshot_path])
4585
4586 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
4587 try:
4588 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, group_snapshot, "--group_name", group)
4589 except CommandFailedError as ce:
4590 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
4591 else:
4592 self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
4593
4594 # remove subvolumegroup snapshot
4595 self.mount_a.run_shell(['sudo', 'rmdir', group_snapshot_path], omit_sudo=False)
4596
4597 # remove subvolume
4598 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4599
4600 # verify trash dir is clean
4601 self._wait_for_trash_empty()
4602
4603 # remove group
4604 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4605
4606 def test_subvolume_retain_snapshot_invalid_recreate(self):
4607 """
4608 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
4609 """
4610 subvolume = self._generate_random_subvolume_name()
4611 snapshot = self._generate_random_snapshot_name()
4612
4613 # create subvolume
4614 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4615
4616 # snapshot subvolume
4617 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4618
4619 # remove with snapshot retention
4620 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4621
4622 # recreate subvolume with an invalid pool
4623 data_pool = "invalid_pool"
4624 try:
4625 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
4626 except CommandFailedError as ce:
4627 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname")
4628 else:
4629 self.fail("expected recreate of subvolume with invalid poolname to fail")
4630
4631 # fetch info
4632 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4633 self.assertEqual(subvol_info["state"], "snapshot-retained",
4634 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4635
4636 # getpath
4637 try:
4638 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
4639 except CommandFailedError as ce:
4640 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
4641 else:
4642 self.fail("expected getpath of subvolume with retained snapshots to fail")
4643
4644 # remove snapshot (should remove volume)
4645 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4646
4647 # verify trash dir is clean
4648 self._wait_for_trash_empty()
4649
4650 def test_subvolume_retain_snapshot_recreate_subvolume(self):
4651 """
4652 ensure a retained subvolume can be recreated and further snapshotted
4653 """
4654 snap_md = ["created_at", "data_pool", "has_pending_clones"]
4655
4656 subvolume = self._generate_random_subvolume_name()
4657 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
4658
4659 # create subvolume
4660 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4661
4662 # snapshot subvolume
4663 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
4664
4665 # remove with snapshot retention
4666 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4667
4668 # fetch info
4669 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4670 self.assertEqual(subvol_info["state"], "snapshot-retained",
4671 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4672
4673 # recreate retained subvolume
4674 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4675
4676 # fetch info
4677 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4678 self.assertEqual(subvol_info["state"], "complete",
4679 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4680
4681 # snapshot info (older snapshot)
4682 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1))
4683 for md in snap_md:
4684 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4685 self.assertEqual(snap_info["has_pending_clones"], "no")
4686
4687 # snap-create (new snapshot)
4688 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
4689
4690 # remove with retain snapshots
4691 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4692
4693 # list snapshots
4694 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
4695 self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
4696 " created subvolume snapshots")
4697 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
4698 for snap in [snapshot1, snapshot2]:
4699 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
4700
4701 # remove snapshots (should remove volume)
4702 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
4703 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
4704
4705 # verify list subvolumes returns an empty list
4706 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4707 self.assertEqual(len(subvolumels), 0)
4708
4709 # verify trash dir is clean
4710 self._wait_for_trash_empty()
4711
4712 def test_subvolume_retain_snapshot_with_snapshots(self):
4713 """
4714 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
4715 also test allowed and dis-allowed operations on a retained subvolume
4716 """
4717 snap_md = ["created_at", "data_pool", "has_pending_clones"]
4718
4719 subvolume = self._generate_random_subvolume_name()
4720 snapshot = self._generate_random_snapshot_name()
4721
4722 # create subvolume
4723 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4724
4725 # snapshot subvolume
4726 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4727
4728 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4729 try:
4730 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4731 except CommandFailedError as ce:
4732 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of retained subvolume with snapshots")
4733 else:
4734 self.fail("expected rm of subvolume with retained snapshots to fail")
4735
4736 # remove with snapshot retention
4737 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4738
4739 # fetch info
4740 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4741 self.assertEqual(subvol_info["state"], "snapshot-retained",
4742 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4743
4744 ## test allowed ops in retained state
4745 # ls
4746 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4747 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
4748 self.assertEqual(subvolumes[0]['name'], subvolume,
4749 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
4750
4751 # snapshot info
4752 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
4753 for md in snap_md:
4754 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4755 self.assertEqual(snap_info["has_pending_clones"], "no")
4756
4757 # rm --force (allowed but should fail)
4758 try:
4759 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
4760 except CommandFailedError as ce:
4761 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
4762 else:
4763 self.fail("expected rm of subvolume with retained snapshots to fail")
4764
4765 # rm (allowed but should fail)
4766 try:
4767 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4768 except CommandFailedError as ce:
4769 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
4770 else:
4771 self.fail("expected rm of subvolume with retained snapshots to fail")
4772
4773 ## test disallowed ops
4774 # getpath
4775 try:
4776 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
4777 except CommandFailedError as ce:
4778 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
4779 else:
4780 self.fail("expected getpath of subvolume with retained snapshots to fail")
4781
4782 # resize
4783 nsize = self.DEFAULT_FILE_SIZE*1024*1024
4784 try:
4785 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
4786 except CommandFailedError as ce:
4787 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on resize of subvolume with retained snapshots")
4788 else:
4789 self.fail("expected resize of subvolume with retained snapshots to fail")
4790
4791 # snap-create
4792 try:
4793 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, "fail")
4794 except CommandFailedError as ce:
4795 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot create of subvolume with retained snapshots")
4796 else:
4797 self.fail("expected snapshot create of subvolume with retained snapshots to fail")
4798
4799 # remove snapshot (should remove volume)
4800 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4801
4802 # verify list subvolumes returns an empty list
4803 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4804 self.assertEqual(len(subvolumels), 0)
4805
4806 # verify trash dir is clean
4807 self._wait_for_trash_empty()
4808
4809 def test_subvolume_retain_snapshot_without_snapshots(self):
4810 """
4811 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
4812 """
4813 subvolume = self._generate_random_subvolume_name()
4814
4815 # create subvolume
4816 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4817
4818 # remove with snapshot retention (should remove volume, no snapshots to retain)
4819 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4820
4821 # verify list subvolumes returns an empty list
4822 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4823 self.assertEqual(len(subvolumels), 0)
4824
4825 # verify trash dir is clean
4826 self._wait_for_trash_empty()
4827
4828 def test_subvolume_retain_snapshot_trash_busy_recreate(self):
4829 """
4830 ensure retained subvolume recreate fails if its trash is not yet purged
4831 """
4832 subvolume = self._generate_random_subvolume_name()
4833 snapshot = self._generate_random_snapshot_name()
4834
4835 # create subvolume
4836 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4837
4838 # snapshot subvolume
4839 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4840
4841 # remove with snapshot retention
4842 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4843
4844 # fake a trash entry
4845 self._update_fake_trash(subvolume)
4846
4847 # recreate subvolume
4848 try:
4849 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4850 except CommandFailedError as ce:
4851 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of subvolume with purge pending")
4852 else:
4853 self.fail("expected recreate of subvolume with purge pending to fail")
4854
4855 # clear fake trash entry
4856 self._update_fake_trash(subvolume, create=False)
4857
4858 # recreate subvolume
4859 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4860
4861 # remove snapshot
4862 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4863
4864 # remove subvolume
4865 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4866
4867 # verify trash dir is clean
4868 self._wait_for_trash_empty()
4869
4870 def test_subvolume_rm_with_snapshots(self):
4871 subvolume = self._generate_random_subvolume_name()
4872 snapshot = self._generate_random_snapshot_name()
4873
4874 # create subvolume
4875 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4876
4877 # snapshot subvolume
4878 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4879
4880 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4881 try:
4882 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4883 except CommandFailedError as ce:
4884 if ce.exitstatus != errno.ENOTEMPTY:
4885 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
4886 else:
4887 raise RuntimeError("expected subvolume deletion to fail")
4888
4889 # remove snapshot
4890 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4891
4892 # remove subvolume
4893 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4894
4895 # verify trash dir is clean
4896 self._wait_for_trash_empty()
4897
4898 def test_subvolume_snapshot_protect_unprotect_sanity(self):
4899 """
4900 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
4901 invoking the command does not cause errors, till they are removed from a subsequent release.
4902 """
4903 subvolume = self._generate_random_subvolume_name()
4904 snapshot = self._generate_random_snapshot_name()
4905 clone = self._generate_random_clone_name()
4906
4907 # create subvolume
4908 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
4909
4910 # do some IO
4911 self._do_subvolume_io(subvolume, number_of_files=64)
4912
4913 # snapshot subvolume
4914 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4915
4916 # now, protect snapshot
4917 self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot)
4918
4919 # schedule a clone
4920 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4921
4922 # check clone status
4923 self._wait_for_clone_to_complete(clone)
4924
4925 # now, unprotect snapshot
4926 self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot)
4927
4928 # verify clone
4929 self._verify_clone(subvolume, snapshot, clone)
4930
4931 # remove snapshot
4932 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4933
4934 # remove subvolumes
4935 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4936 self._fs_cmd("subvolume", "rm", self.volname, clone)
4937
4938 # verify trash dir is clean
4939 self._wait_for_trash_empty()
4940
4941 def test_subvolume_snapshot_rm_force(self):
4942 # test removing non existing subvolume snapshot with --force
4943 subvolume = self._generate_random_subvolume_name()
4944 snapshot = self._generate_random_snapshot_name()
4945
4946 # remove snapshot
4947 try:
4948 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force")
4949 except CommandFailedError:
4950 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
4951
4952 def test_subvolume_snapshot_metadata_set(self):
4953 """
4954 Set custom metadata for subvolume snapshot.
4955 """
4956 subvolname = self._generate_random_subvolume_name()
4957 group = self._generate_random_group_name()
4958 snapshot = self._generate_random_snapshot_name()
4959
4960 # create group.
4961 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4962
4963 # create subvolume in group.
4964 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4965
4966 # snapshot subvolume
4967 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4968
4969 # set metadata for snapshot.
4970 key = "key"
4971 value = "value"
4972 try:
4973 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
4974 except CommandFailedError:
4975 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
4976
4977 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
4978 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4979 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4980
4981 # verify trash dir is clean.
4982 self._wait_for_trash_empty()
4983
4984 def test_subvolume_snapshot_metadata_set_idempotence(self):
4985 """
4986 Set custom metadata for subvolume snapshot (Idempotency).
4987 """
4988 subvolname = self._generate_random_subvolume_name()
4989 group = self._generate_random_group_name()
4990 snapshot = self._generate_random_snapshot_name()
4991
4992 # create group.
4993 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4994
4995 # create subvolume in group.
4996 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4997
4998 # snapshot subvolume
4999 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5000
5001 # set metadata for snapshot.
5002 key = "key"
5003 value = "value"
5004 try:
5005 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5006 except CommandFailedError:
5007 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5008
5009 # set same metadata again for subvolume.
5010 try:
5011 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5012 except CommandFailedError:
5013 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed because it is idempotent operation")
5014
5015 # get value for specified key.
5016 try:
5017 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5018 except CommandFailedError:
5019 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5020
5021 # remove '\n' from returned value.
5022 ret = ret.strip('\n')
5023
5024 # match received value with expected value.
5025 self.assertEqual(value, ret)
5026
5027 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5028 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5029 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5030
5031 # verify trash dir is clean.
5032 self._wait_for_trash_empty()
5033
5034 def test_subvolume_snapshot_metadata_get(self):
5035 """
5036 Get custom metadata for a specified key in subvolume snapshot metadata.
5037 """
5038 subvolname = self._generate_random_subvolume_name()
5039 group = self._generate_random_group_name()
5040 snapshot = self._generate_random_snapshot_name()
5041
5042 # create group.
5043 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5044
5045 # create subvolume in group.
5046 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5047
5048 # snapshot subvolume
5049 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5050
5051 # set metadata for snapshot.
5052 key = "key"
5053 value = "value"
5054 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5055
5056 # get value for specified key.
5057 try:
5058 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5059 except CommandFailedError:
5060 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5061
5062 # remove '\n' from returned value.
5063 ret = ret.strip('\n')
5064
5065 # match received value with expected value.
5066 self.assertEqual(value, ret)
5067
5068 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5069 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5070 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5071
5072 # verify trash dir is clean.
5073 self._wait_for_trash_empty()
5074
5075 def test_subvolume_snapshot_metadata_get_for_nonexisting_key(self):
5076 """
5077 Get custom metadata for subvolume snapshot if specified key not exist in metadata.
5078 """
5079 subvolname = self._generate_random_subvolume_name()
5080 group = self._generate_random_group_name()
5081 snapshot = self._generate_random_snapshot_name()
5082
5083 # create group.
5084 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5085
5086 # create subvolume in group.
5087 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5088
5089 # snapshot subvolume
5090 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5091
5092 # set metadata for snapshot.
5093 key = "key"
5094 value = "value"
5095 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5096
5097 # try to get value for nonexisting key
5098 # Expecting ENOENT exit status because key does not exist
5099 try:
5100 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, "key_nonexist", group)
5101 except CommandFailedError as e:
5102 self.assertEqual(e.exitstatus, errno.ENOENT)
5103 else:
5104 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
5105
5106 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5107 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5108 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5109
5110 # verify trash dir is clean.
5111 self._wait_for_trash_empty()
5112
5113 def test_subvolume_snapshot_metadata_get_for_nonexisting_section(self):
5114 """
5115 Get custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5116 """
5117 subvolname = self._generate_random_subvolume_name()
5118 group = self._generate_random_group_name()
5119 snapshot = self._generate_random_snapshot_name()
5120
5121 # create group.
5122 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5123
5124 # create subvolume in group.
5125 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5126
5127 # snapshot subvolume
5128 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5129
5130 # try to get value for nonexisting key (as section does not exist)
5131 # Expecting ENOENT exit status because key does not exist
5132 try:
5133 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, "key", group)
5134 except CommandFailedError as e:
5135 self.assertEqual(e.exitstatus, errno.ENOENT)
5136 else:
5137 self.fail("Expected ENOENT because section does not exist")
5138
5139 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5140 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5141 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5142
5143 # verify trash dir is clean.
5144 self._wait_for_trash_empty()
5145
5146 def test_subvolume_snapshot_metadata_update(self):
5147 """
5148 Update custom metadata for a specified key in subvolume snapshot metadata.
5149 """
5150 subvolname = self._generate_random_subvolume_name()
5151 group = self._generate_random_group_name()
5152 snapshot = self._generate_random_snapshot_name()
5153
5154 # create group.
5155 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5156
5157 # create subvolume in group.
5158 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5159
5160 # snapshot subvolume
5161 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5162
5163 # set metadata for snapshot.
5164 key = "key"
5165 value = "value"
5166 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5167
5168 # update metadata against key.
5169 new_value = "new_value"
5170 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, new_value, group)
5171
5172 # get metadata for specified key of snapshot.
5173 try:
5174 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5175 except CommandFailedError:
5176 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5177
5178 # remove '\n' from returned value.
5179 ret = ret.strip('\n')
5180
5181 # match received value with expected value.
5182 self.assertEqual(new_value, ret)
5183
5184 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5185 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5186 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5187
5188 # verify trash dir is clean.
5189 self._wait_for_trash_empty()
5190
5191 def test_subvolume_snapshot_metadata_list(self):
5192 """
5193 List custom metadata for subvolume snapshot.
5194 """
5195 subvolname = self._generate_random_subvolume_name()
5196 group = self._generate_random_group_name()
5197 snapshot = self._generate_random_snapshot_name()
5198
5199 # create group.
5200 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5201
5202 # create subvolume in group.
5203 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5204
5205 # snapshot subvolume
5206 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5207
5208 # set metadata for subvolume.
5209 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
5210
5211 for k, v in input_metadata_dict.items():
5212 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, k, v, group)
5213
5214 # list metadata
5215 try:
5216 ret_dict = json.loads(self._fs_cmd("subvolume", "snapshot", "metadata", "ls", self.volname, subvolname, snapshot, group))
5217 except CommandFailedError:
5218 self.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5219
5220 # compare output with expected output
5221 self.assertDictEqual(input_metadata_dict, ret_dict)
5222
5223 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5224 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5225 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5226
5227 # verify trash dir is clean.
5228 self._wait_for_trash_empty()
5229
5230 def test_subvolume_snapshot_metadata_list_if_no_metadata_set(self):
5231 """
5232 List custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5233 """
5234 subvolname = self._generate_random_subvolume_name()
5235 group = self._generate_random_group_name()
5236 snapshot = self._generate_random_snapshot_name()
5237
5238 # create group.
5239 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5240
5241 # create subvolume in group.
5242 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5243
5244 # snapshot subvolume
5245 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5246
5247 # list metadata
5248 try:
5249 ret_dict = json.loads(self._fs_cmd("subvolume", "snapshot", "metadata", "ls", self.volname, subvolname, snapshot, group))
5250 except CommandFailedError:
5251 self.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5252
5253 # compare output with expected output
5254 empty_dict = {}
5255 self.assertDictEqual(ret_dict, empty_dict)
5256
5257 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5258 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5259 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5260
5261 # verify trash dir is clean.
5262 self._wait_for_trash_empty()
5263
5264 def test_subvolume_snapshot_metadata_remove(self):
5265 """
5266 Remove custom metadata for a specified key in subvolume snapshot metadata.
5267 """
5268 subvolname = self._generate_random_subvolume_name()
5269 group = self._generate_random_group_name()
5270 snapshot = self._generate_random_snapshot_name()
5271
5272 # create group.
5273 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5274
5275 # create subvolume in group.
5276 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5277
5278 # snapshot subvolume
5279 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5280
5281 # set metadata for snapshot.
5282 key = "key"
5283 value = "value"
5284 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5285
5286 # remove metadata against specified key.
5287 try:
5288 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group)
5289 except CommandFailedError:
5290 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5291
5292 # confirm key is removed by again fetching metadata
5293 try:
5294 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, key, snapshot, group)
5295 except CommandFailedError as e:
5296 self.assertEqual(e.exitstatus, errno.ENOENT)
5297 else:
5298 self.fail("Expected ENOENT because key does not exist")
5299
5300 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5301 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5302 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5303
5304 # verify trash dir is clean.
5305 self._wait_for_trash_empty()
5306
5307 def test_subvolume_snapshot_metadata_remove_for_nonexisting_key(self):
5308 """
5309 Remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5310 """
5311 subvolname = self._generate_random_subvolume_name()
5312 group = self._generate_random_group_name()
5313 snapshot = self._generate_random_snapshot_name()
5314
5315 # create group.
5316 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5317
5318 # create subvolume in group.
5319 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5320
5321 # snapshot subvolume
5322 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5323
5324 # set metadata for snapshot.
5325 key = "key"
5326 value = "value"
5327 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5328
5329 # try to remove value for nonexisting key
5330 # Expecting ENOENT exit status because key does not exist
5331 try:
5332 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, "key_nonexist", group)
5333 except CommandFailedError as e:
5334 self.assertEqual(e.exitstatus, errno.ENOENT)
5335 else:
5336 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
5337
5338 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5339 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5340 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5341
5342 # verify trash dir is clean.
5343 self._wait_for_trash_empty()
5344
5345 def test_subvolume_snapshot_metadata_remove_for_nonexisting_section(self):
5346 """
5347 Remove custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5348 """
5349 subvolname = self._generate_random_subvolume_name()
5350 group = self._generate_random_group_name()
5351 snapshot = self._generate_random_snapshot_name()
5352
5353 # create group.
5354 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5355
5356 # create subvolume in group.
5357 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5358
5359 # snapshot subvolume
5360 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5361
5362 # try to remove value for nonexisting key (as section does not exist)
5363 # Expecting ENOENT exit status because key does not exist
5364 try:
5365 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, "key", group)
5366 except CommandFailedError as e:
5367 self.assertEqual(e.exitstatus, errno.ENOENT)
5368 else:
5369 self.fail("Expected ENOENT because section does not exist")
5370
5371 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5372 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5373 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5374
5375 # verify trash dir is clean.
5376 self._wait_for_trash_empty()
5377
5378 def test_subvolume_snapshot_metadata_remove_force(self):
5379 """
5380 Forcefully remove custom metadata for a specified key in subvolume snapshot metadata.
5381 """
5382 subvolname = self._generate_random_subvolume_name()
5383 group = self._generate_random_group_name()
5384 snapshot = self._generate_random_snapshot_name()
5385
5386 # create group.
5387 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5388
5389 # create subvolume in group.
5390 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5391
5392 # snapshot subvolume
5393 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5394
5395 # set metadata for snapshot.
5396 key = "key"
5397 value = "value"
5398 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5399
5400 # remove metadata against specified key with --force option.
5401 try:
5402 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group, "--force")
5403 except CommandFailedError:
5404 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5405
5406 # confirm key is removed by again fetching metadata
5407 try:
5408 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5409 except CommandFailedError as e:
5410 self.assertEqual(e.exitstatus, errno.ENOENT)
5411 else:
5412 self.fail("Expected ENOENT because key does not exist")
5413
5414 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5415 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5416 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5417
5418 # verify trash dir is clean.
5419 self._wait_for_trash_empty()
5420
5421 def test_subvolume_snapshot_metadata_remove_force_for_nonexisting_key(self):
5422 """
5423 Forcefully remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5424 """
5425 subvolname = self._generate_random_subvolume_name()
5426 group = self._generate_random_group_name()
5427 snapshot = self._generate_random_snapshot_name()
5428
5429 # create group.
5430 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5431
5432 # create subvolume in group.
5433 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5434
5435 # snapshot subvolume
5436 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5437
5438 # set metadata for snapshot.
5439 key = "key"
5440 value = "value"
5441 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5442
5443 # remove metadata against specified key.
5444 try:
5445 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group)
5446 except CommandFailedError:
5447 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5448
5449 # confirm key is removed by again fetching metadata
5450 try:
5451 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5452 except CommandFailedError as e:
5453 self.assertEqual(e.exitstatus, errno.ENOENT)
5454 else:
5455 self.fail("Expected ENOENT because key does not exist")
5456
5457 # again remove metadata against already removed key with --force option.
5458 try:
5459 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group, "--force")
5460 except CommandFailedError:
5461 self.fail("expected the 'fs subvolume snapshot metadata rm' (with --force) command to succeed")
5462
5463 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5464 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5465 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5466
5467 # verify trash dir is clean.
5468 self._wait_for_trash_empty()
5469
5470 def test_subvolume_snapshot_metadata_after_snapshot_remove(self):
5471 """
5472 Verify metadata removal of subvolume snapshot after snapshot removal.
5473 """
5474 subvolname = self._generate_random_subvolume_name()
5475 group = self._generate_random_group_name()
5476 snapshot = self._generate_random_snapshot_name()
5477
5478 # create group.
5479 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5480
5481 # create subvolume in group.
5482 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5483
5484 # snapshot subvolume
5485 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5486
5487 # set metadata for snapshot.
5488 key = "key"
5489 value = "value"
5490 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5491
5492 # get value for specified key.
5493 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5494
5495 # remove '\n' from returned value.
5496 ret = ret.strip('\n')
5497
5498 # match received value with expected value.
5499 self.assertEqual(value, ret)
5500
5501 # remove subvolume snapshot.
5502 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5503
5504 # try to get metadata after removing snapshot.
5505 # Expecting error ENOENT with error message of snapshot does not exist
5506 cmd_ret = self.mgr_cluster.mon_manager.run_cluster_cmd(
5507 args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group],
5508 check_status=False, stdout=StringIO(), stderr=StringIO())
5509 self.assertEqual(cmd_ret.returncode, errno.ENOENT, "Expecting ENOENT error")
5510 self.assertIn(f"snapshot '{snapshot}' does not exist", cmd_ret.stderr.getvalue(),
5511 f"Expecting message: snapshot '{snapshot}' does not exist ")
5512
5513 # confirm metadata is removed by searching section name in .meta file
5514 meta_path = os.path.join(".", "volumes", group, subvolname, ".meta")
5515 section_name = "SNAP_METADATA_" + snapshot
5516
5517 try:
5518 self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False)
5519 except CommandFailedError as e:
5520 self.assertNotEqual(e.exitstatus, 0)
5521 else:
5522 self.fail("Expected non-zero exist status because section should not exist")
5523
5524 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5525 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5526
5527 # verify trash dir is clean.
5528 self._wait_for_trash_empty()
5529
5530 def test_clean_stale_subvolume_snapshot_metadata(self):
5531 """
5532 Validate cleaning of stale subvolume snapshot metadata.
5533 """
5534 subvolname = self._generate_random_subvolume_name()
5535 group = self._generate_random_group_name()
5536 snapshot = self._generate_random_snapshot_name()
5537
5538 # create group.
5539 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5540
5541 # create subvolume in group.
5542 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5543
5544 # snapshot subvolume
5545 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5546
5547 # set metadata for snapshot.
5548 key = "key"
5549 value = "value"
5550 try:
5551 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5552 except CommandFailedError:
5553 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5554
5555 # save the subvolume config file.
5556 meta_path = os.path.join(".", "volumes", group, subvolname, ".meta")
5557 tmp_meta_path = os.path.join(".", "volumes", group, subvolname, ".meta.stale_snap_section")
5558 self.mount_a.run_shell(['sudo', 'cp', '-p', meta_path, tmp_meta_path], omit_sudo=False)
5559
5560 # Delete snapshot, this would remove user snap metadata
5561 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5562
5563 # Copy back saved subvolume config file. This would have stale snapshot metadata
5564 self.mount_a.run_shell(['sudo', 'cp', '-p', tmp_meta_path, meta_path], omit_sudo=False)
5565
5566 # Verify that it has stale snapshot metadata
5567 section_name = "SNAP_METADATA_" + snapshot
5568 try:
5569 self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False)
5570 except CommandFailedError:
5571 self.fail("Expected grep cmd to succeed because stale snapshot metadata exist")
5572
5573 # Do any subvolume operation to clean the stale snapshot metadata
5574 _ = json.loads(self._get_subvolume_info(self.volname, subvolname, group))
5575
5576 # Verify that the stale snapshot metadata is cleaned
5577 try:
5578 self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False)
5579 except CommandFailedError as e:
5580 self.assertNotEqual(e.exitstatus, 0)
5581 else:
5582 self.fail("Expected non-zero exist status because stale snapshot metadata should not exist")
5583
5584 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5585 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5586
5587 # verify trash dir is clean.
5588 self._wait_for_trash_empty()
5589 # Clean tmp config file
5590 self.mount_a.run_shell(['sudo', 'rm', '-f', tmp_meta_path], omit_sudo=False)
5591
5592
5593 class TestSubvolumeSnapshotClones(TestVolumesHelper):
5594 """ Tests for FS subvolume snapshot clone operations."""
5595 def test_clone_subvolume_info(self):
5596 # tests the 'fs subvolume info' command for a clone
5597 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
5598 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
5599 "type", "uid"]
5600
5601 subvolume = self._generate_random_subvolume_name()
5602 snapshot = self._generate_random_snapshot_name()
5603 clone = self._generate_random_clone_name()
5604
5605 # create subvolume
5606 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5607
5608 # do some IO
5609 self._do_subvolume_io(subvolume, number_of_files=1)
5610
5611 # snapshot subvolume
5612 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5613
5614 # schedule a clone
5615 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5616
5617 # check clone status
5618 self._wait_for_clone_to_complete(clone)
5619
5620 # remove snapshot
5621 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5622
5623 subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
5624 if len(subvol_info) == 0:
5625 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
5626 for md in subvol_md:
5627 if md not in subvol_info.keys():
5628 raise RuntimeError("%s not present in the metadata of subvolume" % md)
5629 if subvol_info["type"] != "clone":
5630 raise RuntimeError("type should be set to clone")
5631
5632 # remove subvolumes
5633 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5634 self._fs_cmd("subvolume", "rm", self.volname, clone)
5635
5636 # verify trash dir is clean
5637 self._wait_for_trash_empty()
5638
5639 def test_subvolume_snapshot_info_without_snapshot_clone(self):
5640 """
5641 Verify subvolume snapshot info output without cloning snapshot.
5642 If no clone is performed then path /volumes/_index/clone/{track_id}
5643 will not exist.
5644 """
5645 subvolume = self._generate_random_subvolume_name()
5646 snapshot = self._generate_random_snapshot_name()
5647
5648 # create subvolume.
5649 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5650
5651 # snapshot subvolume
5652 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5653
5654 # list snapshot info
5655 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5656
5657 # verify snapshot info
5658 self.assertEqual(result['has_pending_clones'], "no")
5659 self.assertFalse('orphan_clones_count' in result)
5660 self.assertFalse('pending_clones' in result)
5661
5662 # remove snapshot, subvolume, clone
5663 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5664 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5665
5666 # verify trash dir is clean
5667 self._wait_for_trash_empty()
5668
5669 def test_subvolume_snapshot_info_if_no_clone_pending(self):
5670 """
5671 Verify subvolume snapshot info output if no clone is in pending state.
5672 """
5673 subvolume = self._generate_random_subvolume_name()
5674 snapshot = self._generate_random_snapshot_name()
5675 clone_list = [f'clone_{i}' for i in range(3)]
5676
5677 # create subvolume.
5678 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5679
5680 # snapshot subvolume
5681 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5682
5683 # schedule a clones
5684 for clone in clone_list:
5685 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5686
5687 # check clones status
5688 for clone in clone_list:
5689 self._wait_for_clone_to_complete(clone)
5690
5691 # list snapshot info
5692 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5693
5694 # verify snapshot info
5695 self.assertEqual(result['has_pending_clones'], "no")
5696 self.assertFalse('orphan_clones_count' in result)
5697 self.assertFalse('pending_clones' in result)
5698
5699 # remove snapshot, subvolume, clone
5700 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5701 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5702 for clone in clone_list:
5703 self._fs_cmd("subvolume", "rm", self.volname, clone)
5704
5705 # verify trash dir is clean
5706 self._wait_for_trash_empty()
5707
5708 def test_subvolume_snapshot_info_if_clone_pending_for_no_group(self):
5709 """
5710 Verify subvolume snapshot info output if clones are in pending state.
5711 Clones are not specified for particular target_group. Hence target_group
5712 should not be in the output as we don't show _nogroup (default group)
5713 """
5714 subvolume = self._generate_random_subvolume_name()
5715 snapshot = self._generate_random_snapshot_name()
5716 clone_list = [f'clone_{i}' for i in range(3)]
5717
5718 # create subvolume.
5719 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5720
5721 # snapshot subvolume
5722 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5723
5724 # insert delay at the beginning of snapshot clone
5725 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5726
5727 # schedule a clones
5728 for clone in clone_list:
5729 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5730
5731 # list snapshot info
5732 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5733
5734 # verify snapshot info
5735 expected_clone_list = []
5736 for clone in clone_list:
5737 expected_clone_list.append({"name": clone})
5738 self.assertEqual(result['has_pending_clones'], "yes")
5739 self.assertFalse('orphan_clones_count' in result)
5740 self.assertListEqual(result['pending_clones'], expected_clone_list)
5741 self.assertEqual(len(result['pending_clones']), 3)
5742
5743 # check clones status
5744 for clone in clone_list:
5745 self._wait_for_clone_to_complete(clone)
5746
5747 # remove snapshot, subvolume, clone
5748 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5749 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5750 for clone in clone_list:
5751 self._fs_cmd("subvolume", "rm", self.volname, clone)
5752
5753 # verify trash dir is clean
5754 self._wait_for_trash_empty()
5755
5756 def test_subvolume_snapshot_info_if_clone_pending_for_target_group(self):
5757 """
5758 Verify subvolume snapshot info output if clones are in pending state.
5759 Clones are not specified for target_group.
5760 """
5761 subvolume = self._generate_random_subvolume_name()
5762 snapshot = self._generate_random_snapshot_name()
5763 clone = self._generate_random_clone_name()
5764 group = self._generate_random_group_name()
5765 target_group = self._generate_random_group_name()
5766
5767 # create groups
5768 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5769 self._fs_cmd("subvolumegroup", "create", self.volname, target_group)
5770
5771 # create subvolume
5772 self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777")
5773
5774 # snapshot subvolume
5775 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
5776
5777 # insert delay at the beginning of snapshot clone
5778 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5779
5780 # schedule a clone
5781 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
5782 "--group_name", group, "--target_group_name", target_group)
5783
5784 # list snapshot info
5785 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot, "--group_name", group))
5786
5787 # verify snapshot info
5788 expected_clone_list = [{"name": clone, "target_group": target_group}]
5789 self.assertEqual(result['has_pending_clones'], "yes")
5790 self.assertFalse('orphan_clones_count' in result)
5791 self.assertListEqual(result['pending_clones'], expected_clone_list)
5792 self.assertEqual(len(result['pending_clones']), 1)
5793
5794 # check clone status
5795 self._wait_for_clone_to_complete(clone, clone_group=target_group)
5796
5797 # remove snapshot
5798 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
5799
5800 # remove subvolumes
5801 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
5802 self._fs_cmd("subvolume", "rm", self.volname, clone, target_group)
5803
5804 # remove groups
5805 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5806 self._fs_cmd("subvolumegroup", "rm", self.volname, target_group)
5807
5808 # verify trash dir is clean
5809 self._wait_for_trash_empty()
5810
5811 def test_subvolume_snapshot_info_if_orphan_clone(self):
5812 """
5813 Verify subvolume snapshot info output if orphan clones exists.
5814 Orphan clones should not list under pending clones.
5815 orphan_clones_count should display correct count of orphan clones'
5816 """
5817 subvolume = self._generate_random_subvolume_name()
5818 snapshot = self._generate_random_snapshot_name()
5819 clone_list = [f'clone_{i}' for i in range(3)]
5820
5821 # create subvolume.
5822 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5823
5824 # snapshot subvolume
5825 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5826
5827 # insert delay at the beginning of snapshot clone
5828 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 15)
5829
5830 # schedule a clones
5831 for clone in clone_list:
5832 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5833
5834 # remove track file for third clone to make it orphan
5835 meta_path = os.path.join(".", "volumes", "_nogroup", subvolume, ".meta")
5836 pending_clones_result = self.mount_a.run_shell(['sudo', 'grep', 'clone snaps', '-A3', meta_path], omit_sudo=False, stdout=StringIO(), stderr=StringIO())
5837 third_clone_track_id = pending_clones_result.stdout.getvalue().splitlines()[3].split(" = ")[0]
5838 third_clone_track_path = os.path.join(".", "volumes", "_index", "clone", third_clone_track_id)
5839 self.mount_a.run_shell(f"sudo rm -f {third_clone_track_path}", omit_sudo=False)
5840
5841 # list snapshot info
5842 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5843
5844 # verify snapshot info
5845 expected_clone_list = []
5846 for i in range(len(clone_list)-1):
5847 expected_clone_list.append({"name": clone_list[i]})
5848 self.assertEqual(result['has_pending_clones'], "yes")
5849 self.assertEqual(result['orphan_clones_count'], 1)
5850 self.assertListEqual(result['pending_clones'], expected_clone_list)
5851 self.assertEqual(len(result['pending_clones']), 2)
5852
5853 # check clones status
5854 for i in range(len(clone_list)-1):
5855 self._wait_for_clone_to_complete(clone_list[i])
5856
5857 # list snapshot info after cloning completion
5858 res = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5859
5860 # verify snapshot info (has_pending_clones should be no)
5861 self.assertEqual(res['has_pending_clones'], "no")
5862
5863 def test_non_clone_status(self):
5864 subvolume = self._generate_random_subvolume_name()
5865
5866 # create subvolume
5867 self._fs_cmd("subvolume", "create", self.volname, subvolume)
5868
5869 try:
5870 self._fs_cmd("clone", "status", self.volname, subvolume)
5871 except CommandFailedError as ce:
5872 if ce.exitstatus != errno.ENOTSUP:
5873 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
5874 else:
5875 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
5876
5877 # remove subvolume
5878 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5879
5880 # verify trash dir is clean
5881 self._wait_for_trash_empty()
5882
5883 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self):
5884 subvolume = self._generate_random_subvolume_name()
5885 snapshot = self._generate_random_snapshot_name()
5886 clone = self._generate_random_clone_name()
5887 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
5888
5889 # create subvolume, in an isolated namespace with a specified size
5890 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize), "--mode=777")
5891
5892 # do some IO
5893 self._do_subvolume_io(subvolume, number_of_files=8)
5894
5895 # snapshot subvolume
5896 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5897
5898 # create a pool different from current subvolume pool
5899 subvol_path = self._get_subvolume_path(self.volname, subvolume)
5900 default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
5901 new_pool = "new_pool"
5902 self.assertNotEqual(default_pool, new_pool)
5903 self.fs.add_data_pool(new_pool)
5904
5905 # update source subvolume pool
5906 self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="")
5907
5908 # schedule a clone, with NO --pool specification
5909 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5910
5911 # check clone status
5912 self._wait_for_clone_to_complete(clone)
5913
5914 # verify clone
5915 self._verify_clone(subvolume, snapshot, clone)
5916
5917 # remove snapshot
5918 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5919
5920 # remove subvolumes
5921 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5922 self._fs_cmd("subvolume", "rm", self.volname, clone)
5923
5924 # verify trash dir is clean
5925 self._wait_for_trash_empty()
5926
5927 def test_subvolume_clone_inherit_quota_attrs(self):
5928 subvolume = self._generate_random_subvolume_name()
5929 snapshot = self._generate_random_snapshot_name()
5930 clone = self._generate_random_clone_name()
5931 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
5932
5933 # create subvolume with a specified size
5934 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777", "--size", str(osize))
5935
5936 # do some IO
5937 self._do_subvolume_io(subvolume, number_of_files=8)
5938
5939 # get subvolume path
5940 subvolpath = self._get_subvolume_path(self.volname, subvolume)
5941
5942 # set quota on number of files
5943 self.mount_a.setfattr(subvolpath, 'ceph.quota.max_files', "20", sudo=True)
5944
5945 # snapshot subvolume
5946 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5947
5948 # schedule a clone
5949 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5950
5951 # check clone status
5952 self._wait_for_clone_to_complete(clone)
5953
5954 # verify clone
5955 self._verify_clone(subvolume, snapshot, clone)
5956
5957 # get subvolume path
5958 clonepath = self._get_subvolume_path(self.volname, clone)
5959
5960 # verify quota max_files is inherited from source snapshot
5961 subvol_quota = self.mount_a.getfattr(subvolpath, "ceph.quota.max_files")
5962 clone_quota = self.mount_a.getfattr(clonepath, "ceph.quota.max_files")
5963 self.assertEqual(subvol_quota, clone_quota)
5964
5965 # remove snapshot
5966 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5967
5968 # remove subvolumes
5969 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5970 self._fs_cmd("subvolume", "rm", self.volname, clone)
5971
5972 # verify trash dir is clean
5973 self._wait_for_trash_empty()
5974
5975 def test_subvolume_clone_in_progress_getpath(self):
5976 subvolume = self._generate_random_subvolume_name()
5977 snapshot = self._generate_random_snapshot_name()
5978 clone = self._generate_random_clone_name()
5979
5980 # create subvolume
5981 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5982
5983 # do some IO
5984 self._do_subvolume_io(subvolume, number_of_files=64)
5985
5986 # snapshot subvolume
5987 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5988
5989 # Insert delay at the beginning of snapshot clone
5990 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5991
5992 # schedule a clone
5993 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5994
5995 # clone should not be accessible right now
5996 try:
5997 self._get_subvolume_path(self.volname, clone)
5998 except CommandFailedError as ce:
5999 if ce.exitstatus != errno.EAGAIN:
6000 raise RuntimeError("invalid error code when fetching path of an pending clone")
6001 else:
6002 raise RuntimeError("expected fetching path of an pending clone to fail")
6003
6004 # check clone status
6005 self._wait_for_clone_to_complete(clone)
6006
6007 # clone should be accessible now
6008 subvolpath = self._get_subvolume_path(self.volname, clone)
6009 self.assertNotEqual(subvolpath, None)
6010
6011 # verify clone
6012 self._verify_clone(subvolume, snapshot, clone)
6013
6014 # remove snapshot
6015 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6016
6017 # remove subvolumes
6018 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6019 self._fs_cmd("subvolume", "rm", self.volname, clone)
6020
6021 # verify trash dir is clean
6022 self._wait_for_trash_empty()
6023
6024 def test_subvolume_clone_in_progress_snapshot_rm(self):
6025 subvolume = self._generate_random_subvolume_name()
6026 snapshot = self._generate_random_snapshot_name()
6027 clone = self._generate_random_clone_name()
6028
6029 # create subvolume
6030 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6031
6032 # do some IO
6033 self._do_subvolume_io(subvolume, number_of_files=64)
6034
6035 # snapshot subvolume
6036 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6037
6038 # Insert delay at the beginning of snapshot clone
6039 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6040
6041 # schedule a clone
6042 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6043
6044 # snapshot should not be deletable now
6045 try:
6046 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6047 except CommandFailedError as ce:
6048 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
6049 else:
6050 self.fail("expected removing source snapshot of a clone to fail")
6051
6052 # check clone status
6053 self._wait_for_clone_to_complete(clone)
6054
6055 # clone should be accessible now
6056 subvolpath = self._get_subvolume_path(self.volname, clone)
6057 self.assertNotEqual(subvolpath, None)
6058
6059 # verify clone
6060 self._verify_clone(subvolume, snapshot, clone)
6061
6062 # remove snapshot
6063 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6064
6065 # remove subvolumes
6066 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6067 self._fs_cmd("subvolume", "rm", self.volname, clone)
6068
6069 # verify trash dir is clean
6070 self._wait_for_trash_empty()
6071
6072 def test_subvolume_clone_in_progress_source(self):
6073 subvolume = self._generate_random_subvolume_name()
6074 snapshot = self._generate_random_snapshot_name()
6075 clone = self._generate_random_clone_name()
6076
6077 # create subvolume
6078 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6079
6080 # do some IO
6081 self._do_subvolume_io(subvolume, number_of_files=64)
6082
6083 # snapshot subvolume
6084 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6085
6086 # Insert delay at the beginning of snapshot clone
6087 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6088
6089 # schedule a clone
6090 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6091
6092 # verify clone source
6093 result = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
6094 source = result['status']['source']
6095 self.assertEqual(source['volume'], self.volname)
6096 self.assertEqual(source['subvolume'], subvolume)
6097 self.assertEqual(source.get('group', None), None)
6098 self.assertEqual(source['snapshot'], snapshot)
6099
6100 # check clone status
6101 self._wait_for_clone_to_complete(clone)
6102
6103 # clone should be accessible now
6104 subvolpath = self._get_subvolume_path(self.volname, clone)
6105 self.assertNotEqual(subvolpath, None)
6106
6107 # verify clone
6108 self._verify_clone(subvolume, snapshot, clone)
6109
6110 # remove snapshot
6111 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6112
6113 # remove subvolumes
6114 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6115 self._fs_cmd("subvolume", "rm", self.volname, clone)
6116
6117 # verify trash dir is clean
6118 self._wait_for_trash_empty()
6119
6120 def test_subvolume_clone_retain_snapshot_with_snapshots(self):
6121 """
6122 retain snapshots of a cloned subvolume and check disallowed operations
6123 """
6124 subvolume = self._generate_random_subvolume_name()
6125 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
6126 clone = self._generate_random_clone_name()
6127
6128 # create subvolume
6129 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6130
6131 # store path for clone verification
6132 subvol1_path = self._get_subvolume_path(self.volname, subvolume)
6133
6134 # do some IO
6135 self._do_subvolume_io(subvolume, number_of_files=16)
6136
6137 # snapshot subvolume
6138 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
6139
6140 # remove with snapshot retention
6141 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6142
6143 # clone retained subvolume snapshot
6144 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot1, clone)
6145
6146 # check clone status
6147 self._wait_for_clone_to_complete(clone)
6148
6149 # verify clone
6150 self._verify_clone(subvolume, snapshot1, clone, subvol_path=subvol1_path)
6151
6152 # create a snapshot on the clone
6153 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot2)
6154
6155 # retain a clone
6156 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
6157
6158 # list snapshots
6159 clonesnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, clone))
6160 self.assertEqual(len(clonesnapshotls), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
6161 " created subvolume snapshots")
6162 snapshotnames = [snapshot['name'] for snapshot in clonesnapshotls]
6163 for snap in [snapshot2]:
6164 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
6165
6166 ## check disallowed operations on retained clone
6167 # clone-status
6168 try:
6169 self._fs_cmd("clone", "status", self.volname, clone)
6170 except CommandFailedError as ce:
6171 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone status of clone with retained snapshots")
6172 else:
6173 self.fail("expected clone status of clone with retained snapshots to fail")
6174
6175 # clone-cancel
6176 try:
6177 self._fs_cmd("clone", "cancel", self.volname, clone)
6178 except CommandFailedError as ce:
6179 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone cancel of clone with retained snapshots")
6180 else:
6181 self.fail("expected clone cancel of clone with retained snapshots to fail")
6182
6183 # remove snapshots (removes subvolumes as all are in retained state)
6184 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
6185 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot2)
6186
6187 # verify list subvolumes returns an empty list
6188 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6189 self.assertEqual(len(subvolumels), 0)
6190
6191 # verify trash dir is clean
6192 self._wait_for_trash_empty()
6193
6194 def test_subvolume_retain_snapshot_clone(self):
6195 """
6196 clone a snapshot from a snapshot retained subvolume
6197 """
6198 subvolume = self._generate_random_subvolume_name()
6199 snapshot = self._generate_random_snapshot_name()
6200 clone = self._generate_random_clone_name()
6201
6202 # create subvolume
6203 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6204
6205 # store path for clone verification
6206 subvol_path = self._get_subvolume_path(self.volname, subvolume)
6207
6208 # do some IO
6209 self._do_subvolume_io(subvolume, number_of_files=16)
6210
6211 # snapshot subvolume
6212 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6213
6214 # remove with snapshot retention
6215 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6216
6217 # clone retained subvolume snapshot
6218 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6219
6220 # check clone status
6221 self._wait_for_clone_to_complete(clone)
6222
6223 # verify clone
6224 self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path)
6225
6226 # remove snapshots (removes retained volume)
6227 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6228
6229 # remove subvolume
6230 self._fs_cmd("subvolume", "rm", self.volname, clone)
6231
6232 # verify list subvolumes returns an empty list
6233 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6234 self.assertEqual(len(subvolumels), 0)
6235
6236 # verify trash dir is clean
6237 self._wait_for_trash_empty()
6238
6239 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self):
6240 """
6241 clone a subvolume from recreated subvolume's latest snapshot
6242 """
6243 subvolume = self._generate_random_subvolume_name()
6244 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
6245 clone = self._generate_random_clone_name(1)
6246
6247 # create subvolume
6248 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6249
6250 # do some IO
6251 self._do_subvolume_io(subvolume, number_of_files=16)
6252
6253 # snapshot subvolume
6254 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
6255
6256 # remove with snapshot retention
6257 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6258
6259 # recreate subvolume
6260 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6261
6262 # get and store path for clone verification
6263 subvol2_path = self._get_subvolume_path(self.volname, subvolume)
6264
6265 # do some IO
6266 self._do_subvolume_io(subvolume, number_of_files=16)
6267
6268 # snapshot newer subvolume
6269 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
6270
6271 # remove with snapshot retention
6272 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6273
6274 # clone retained subvolume's newer snapshot
6275 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot2, clone)
6276
6277 # check clone status
6278 self._wait_for_clone_to_complete(clone)
6279
6280 # verify clone
6281 self._verify_clone(subvolume, snapshot2, clone, subvol_path=subvol2_path)
6282
6283 # remove snapshot
6284 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
6285 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
6286
6287 # remove subvolume
6288 self._fs_cmd("subvolume", "rm", self.volname, clone)
6289
6290 # verify list subvolumes returns an empty list
6291 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6292 self.assertEqual(len(subvolumels), 0)
6293
6294 # verify trash dir is clean
6295 self._wait_for_trash_empty()
6296
6297 def test_subvolume_retain_snapshot_recreate(self):
6298 """
6299 recreate a subvolume from one of its retained snapshots
6300 """
6301 subvolume = self._generate_random_subvolume_name()
6302 snapshot = self._generate_random_snapshot_name()
6303
6304 # create subvolume
6305 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6306
6307 # store path for clone verification
6308 subvol_path = self._get_subvolume_path(self.volname, subvolume)
6309
6310 # do some IO
6311 self._do_subvolume_io(subvolume, number_of_files=16)
6312
6313 # snapshot subvolume
6314 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6315
6316 # remove with snapshot retention
6317 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6318
6319 # recreate retained subvolume using its own snapshot to clone
6320 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume)
6321
6322 # check clone status
6323 self._wait_for_clone_to_complete(subvolume)
6324
6325 # verify clone
6326 self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path)
6327
6328 # remove snapshot
6329 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6330
6331 # remove subvolume
6332 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6333
6334 # verify list subvolumes returns an empty list
6335 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6336 self.assertEqual(len(subvolumels), 0)
6337
6338 # verify trash dir is clean
6339 self._wait_for_trash_empty()
6340
6341 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self):
6342 """
6343 ensure retained clone recreate fails if its trash is not yet purged
6344 """
6345 subvolume = self._generate_random_subvolume_name()
6346 snapshot = self._generate_random_snapshot_name()
6347 clone = self._generate_random_clone_name()
6348
6349 # create subvolume
6350 self._fs_cmd("subvolume", "create", self.volname, subvolume)
6351
6352 # snapshot subvolume
6353 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6354
6355 # clone subvolume snapshot
6356 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6357
6358 # check clone status
6359 self._wait_for_clone_to_complete(clone)
6360
6361 # snapshot clone
6362 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot)
6363
6364 # remove clone with snapshot retention
6365 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
6366
6367 # fake a trash entry
6368 self._update_fake_trash(clone)
6369
6370 # clone subvolume snapshot (recreate)
6371 try:
6372 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6373 except CommandFailedError as ce:
6374 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending")
6375 else:
6376 self.fail("expected recreate of clone with purge pending to fail")
6377
6378 # clear fake trash entry
6379 self._update_fake_trash(clone, create=False)
6380
6381 # recreate subvolume
6382 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6383
6384 # check clone status
6385 self._wait_for_clone_to_complete(clone)
6386
6387 # remove snapshot
6388 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6389 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot)
6390
6391 # remove subvolume
6392 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6393 self._fs_cmd("subvolume", "rm", self.volname, clone)
6394
6395 # verify trash dir is clean
6396 self._wait_for_trash_empty()
6397
6398 def test_subvolume_snapshot_attr_clone(self):
6399 subvolume = self._generate_random_subvolume_name()
6400 snapshot = self._generate_random_snapshot_name()
6401 clone = self._generate_random_clone_name()
6402
6403 # create subvolume
6404 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6405
6406 # do some IO
6407 self._do_subvolume_io_mixed(subvolume)
6408
6409 # snapshot subvolume
6410 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6411
6412 # schedule a clone
6413 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6414
6415 # check clone status
6416 self._wait_for_clone_to_complete(clone)
6417
6418 # verify clone
6419 self._verify_clone(subvolume, snapshot, clone)
6420
6421 # remove snapshot
6422 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6423
6424 # remove subvolumes
6425 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6426 self._fs_cmd("subvolume", "rm", self.volname, clone)
6427
6428 # verify trash dir is clean
6429 self._wait_for_trash_empty()
6430
6431 def test_clone_failure_status_pending_in_progress_complete(self):
6432 """
6433 ensure failure status is not shown when clone is not in failed/cancelled state
6434 """
6435 subvolume = self._generate_random_subvolume_name()
6436 snapshot = self._generate_random_snapshot_name()
6437 clone1 = self._generate_random_clone_name()
6438
6439 # create subvolume
6440 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6441
6442 # do some IO
6443 self._do_subvolume_io(subvolume, number_of_files=200)
6444
6445 # snapshot subvolume
6446 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6447
6448 # Insert delay at the beginning of snapshot clone
6449 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6450
6451 # schedule a clone1
6452 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6453
6454 # pending clone shouldn't show failure status
6455 clone1_result = self._get_clone_status(clone1)
6456 try:
6457 clone1_result["status"]["failure"]["errno"]
6458 except KeyError as e:
6459 self.assertEqual(str(e), "'failure'")
6460 else:
6461 self.fail("clone status shouldn't show failure for pending clone")
6462
6463 # check clone1 to be in-progress
6464 self._wait_for_clone_to_be_in_progress(clone1)
6465
6466 # in-progress clone1 shouldn't show failure status
6467 clone1_result = self._get_clone_status(clone1)
6468 try:
6469 clone1_result["status"]["failure"]["errno"]
6470 except KeyError as e:
6471 self.assertEqual(str(e), "'failure'")
6472 else:
6473 self.fail("clone status shouldn't show failure for in-progress clone")
6474
6475 # wait for clone1 to complete
6476 self._wait_for_clone_to_complete(clone1)
6477
6478 # complete clone1 shouldn't show failure status
6479 clone1_result = self._get_clone_status(clone1)
6480 try:
6481 clone1_result["status"]["failure"]["errno"]
6482 except KeyError as e:
6483 self.assertEqual(str(e), "'failure'")
6484 else:
6485 self.fail("clone status shouldn't show failure for complete clone")
6486
6487 # remove snapshot
6488 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6489
6490 # remove subvolumes
6491 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6492 self._fs_cmd("subvolume", "rm", self.volname, clone1)
6493
6494 # verify trash dir is clean
6495 self._wait_for_trash_empty()
6496
6497 def test_clone_failure_status_failed(self):
6498 """
6499 ensure failure status is shown when clone is in failed state and validate the reason
6500 """
6501 subvolume = self._generate_random_subvolume_name()
6502 snapshot = self._generate_random_snapshot_name()
6503 clone1 = self._generate_random_clone_name()
6504
6505 # create subvolume
6506 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6507
6508 # do some IO
6509 self._do_subvolume_io(subvolume, number_of_files=200)
6510
6511 # snapshot subvolume
6512 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6513
6514 # Insert delay at the beginning of snapshot clone
6515 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6516
6517 # schedule a clone1
6518 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6519
6520 # remove snapshot from backend to force the clone failure.
6521 snappath = os.path.join(".", "volumes", "_nogroup", subvolume, ".snap", snapshot)
6522 self.mount_a.run_shell(['sudo', 'rmdir', snappath], omit_sudo=False)
6523
6524 # wait for clone1 to fail.
6525 self._wait_for_clone_to_fail(clone1)
6526
6527 # check clone1 status
6528 clone1_result = self._get_clone_status(clone1)
6529 self.assertEqual(clone1_result["status"]["state"], "failed")
6530 self.assertEqual(clone1_result["status"]["failure"]["errno"], "2")
6531 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "snapshot '{0}' does not exist".format(snapshot))
6532
6533 # clone removal should succeed after failure, remove clone1
6534 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
6535
6536 # remove subvolumes
6537 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6538
6539 # verify trash dir is clean
6540 self._wait_for_trash_empty()
6541
6542 def test_clone_failure_status_pending_cancelled(self):
6543 """
6544 ensure failure status is shown when clone is cancelled during pending state and validate the reason
6545 """
6546 subvolume = self._generate_random_subvolume_name()
6547 snapshot = self._generate_random_snapshot_name()
6548 clone1 = self._generate_random_clone_name()
6549
6550 # create subvolume
6551 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6552
6553 # do some IO
6554 self._do_subvolume_io(subvolume, number_of_files=200)
6555
6556 # snapshot subvolume
6557 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6558
6559 # Insert delay at the beginning of snapshot clone
6560 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6561
6562 # schedule a clone1
6563 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6564
6565 # cancel pending clone1
6566 self._fs_cmd("clone", "cancel", self.volname, clone1)
6567
6568 # check clone1 status
6569 clone1_result = self._get_clone_status(clone1)
6570 self.assertEqual(clone1_result["status"]["state"], "canceled")
6571 self.assertEqual(clone1_result["status"]["failure"]["errno"], "4")
6572 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "user interrupted clone operation")
6573
6574 # clone removal should succeed with force after cancelled, remove clone1
6575 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
6576
6577 # remove snapshot
6578 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6579
6580 # remove subvolumes
6581 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6582
6583 # verify trash dir is clean
6584 self._wait_for_trash_empty()
6585
6586 def test_clone_failure_status_in_progress_cancelled(self):
6587 """
6588 ensure failure status is shown when clone is cancelled during in-progress state and validate the reason
6589 """
6590 subvolume = self._generate_random_subvolume_name()
6591 snapshot = self._generate_random_snapshot_name()
6592 clone1 = self._generate_random_clone_name()
6593
6594 # create subvolume
6595 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6596
6597 # do some IO
6598 self._do_subvolume_io(subvolume, number_of_files=200)
6599
6600 # snapshot subvolume
6601 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6602
6603 # Insert delay at the beginning of snapshot clone
6604 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6605
6606 # schedule a clone1
6607 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6608
6609 # wait for clone1 to be in-progress
6610 self._wait_for_clone_to_be_in_progress(clone1)
6611
6612 # cancel in-progess clone1
6613 self._fs_cmd("clone", "cancel", self.volname, clone1)
6614
6615 # check clone1 status
6616 clone1_result = self._get_clone_status(clone1)
6617 self.assertEqual(clone1_result["status"]["state"], "canceled")
6618 self.assertEqual(clone1_result["status"]["failure"]["errno"], "4")
6619 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "user interrupted clone operation")
6620
6621 # clone removal should succeed with force after cancelled, remove clone1
6622 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
6623
6624 # remove snapshot
6625 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6626
6627 # remove subvolumes
6628 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6629
6630 # verify trash dir is clean
6631 self._wait_for_trash_empty()
6632
6633 def test_subvolume_snapshot_clone(self):
6634 subvolume = self._generate_random_subvolume_name()
6635 snapshot = self._generate_random_snapshot_name()
6636 clone = self._generate_random_clone_name()
6637
6638 # create subvolume
6639 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6640
6641 # do some IO
6642 self._do_subvolume_io(subvolume, number_of_files=64)
6643
6644 # snapshot subvolume
6645 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6646
6647 # schedule a clone
6648 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6649
6650 # check clone status
6651 self._wait_for_clone_to_complete(clone)
6652
6653 # verify clone
6654 self._verify_clone(subvolume, snapshot, clone)
6655
6656 # remove snapshot
6657 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6658
6659 # remove subvolumes
6660 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6661 self._fs_cmd("subvolume", "rm", self.volname, clone)
6662
6663 # verify trash dir is clean
6664 self._wait_for_trash_empty()
6665
6666 def test_subvolume_snapshot_clone_quota_exceeded(self):
6667 subvolume = self._generate_random_subvolume_name()
6668 snapshot = self._generate_random_snapshot_name()
6669 clone = self._generate_random_clone_name()
6670
6671 # create subvolume with 20MB quota
6672 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
6673 self._fs_cmd("subvolume", "create", self.volname, subvolume,"--mode=777", "--size", str(osize))
6674
6675 # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
6676 try:
6677 self._do_subvolume_io(subvolume, number_of_files=50)
6678 except CommandFailedError:
6679 # ignore quota enforcement error.
6680 pass
6681
6682 # snapshot subvolume
6683 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6684
6685 # schedule a clone
6686 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6687
6688 # check clone status
6689 self._wait_for_clone_to_complete(clone)
6690
6691 # verify clone
6692 self._verify_clone(subvolume, snapshot, clone)
6693
6694 # remove snapshot
6695 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6696
6697 # remove subvolumes
6698 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6699 self._fs_cmd("subvolume", "rm", self.volname, clone)
6700
6701 # verify trash dir is clean
6702 self._wait_for_trash_empty()
6703
6704 def test_subvolume_snapshot_in_complete_clone_rm(self):
6705 """
6706 Validates the removal of clone when it is not in 'complete|cancelled|failed' state.
6707 The forceful removl of subvolume clone succeeds only if it's in any of the
6708 'complete|cancelled|failed' states. It fails with EAGAIN in any other states.
6709 """
6710
6711 subvolume = self._generate_random_subvolume_name()
6712 snapshot = self._generate_random_snapshot_name()
6713 clone = self._generate_random_clone_name()
6714
6715 # create subvolume
6716 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6717
6718 # do some IO
6719 self._do_subvolume_io(subvolume, number_of_files=64)
6720
6721 # snapshot subvolume
6722 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6723
6724 # Insert delay at the beginning of snapshot clone
6725 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6726
6727 # schedule a clone
6728 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6729
6730 # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled.
6731 try:
6732 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6733 except CommandFailedError as ce:
6734 if ce.exitstatus != errno.EAGAIN:
6735 raise RuntimeError("invalid error code when trying to remove failed clone")
6736 else:
6737 raise RuntimeError("expected error when removing a failed clone")
6738
6739 # cancel on-going clone
6740 self._fs_cmd("clone", "cancel", self.volname, clone)
6741
6742 # verify canceled state
6743 self._check_clone_canceled(clone)
6744
6745 # clone removal should succeed after cancel
6746 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6747
6748 # remove snapshot
6749 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6750
6751 # remove subvolumes
6752 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6753
6754 # verify trash dir is clean
6755 self._wait_for_trash_empty()
6756
6757 def test_subvolume_snapshot_clone_retain_suid_guid(self):
6758 subvolume = self._generate_random_subvolume_name()
6759 snapshot = self._generate_random_snapshot_name()
6760 clone = self._generate_random_clone_name()
6761
6762 # create subvolume
6763 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6764
6765 # Create a file with suid, guid bits set along with executable bit.
6766 args = ["subvolume", "getpath", self.volname, subvolume]
6767 args = tuple(args)
6768 subvolpath = self._fs_cmd(*args)
6769 self.assertNotEqual(subvolpath, None)
6770 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
6771
6772 file_path = subvolpath
6773 file_path = os.path.join(subvolpath, "test_suid_file")
6774 self.mount_a.run_shell(["touch", file_path])
6775 self.mount_a.run_shell(["chmod", "u+sx,g+sx", file_path])
6776
6777 # snapshot subvolume
6778 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6779
6780 # schedule a clone
6781 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6782
6783 # check clone status
6784 self._wait_for_clone_to_complete(clone)
6785
6786 # verify clone
6787 self._verify_clone(subvolume, snapshot, clone)
6788
6789 # remove snapshot
6790 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6791
6792 # remove subvolumes
6793 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6794 self._fs_cmd("subvolume", "rm", self.volname, clone)
6795
6796 # verify trash dir is clean
6797 self._wait_for_trash_empty()
6798
6799 def test_subvolume_snapshot_clone_and_reclone(self):
6800 subvolume = self._generate_random_subvolume_name()
6801 snapshot = self._generate_random_snapshot_name()
6802 clone1, clone2 = self._generate_random_clone_name(2)
6803
6804 # create subvolume
6805 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6806
6807 # do some IO
6808 self._do_subvolume_io(subvolume, number_of_files=32)
6809
6810 # snapshot subvolume
6811 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6812
6813 # schedule a clone
6814 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6815
6816 # check clone status
6817 self._wait_for_clone_to_complete(clone1)
6818
6819 # verify clone
6820 self._verify_clone(subvolume, snapshot, clone1)
6821
6822 # remove snapshot
6823 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6824
6825 # now the clone is just like a normal subvolume -- snapshot the clone and fork
6826 # another clone. before that do some IO so it's can be differentiated.
6827 self._do_subvolume_io(clone1, create_dir="data", number_of_files=32)
6828
6829 # snapshot clone -- use same snap name
6830 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot)
6831
6832 # schedule a clone
6833 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2)
6834
6835 # check clone status
6836 self._wait_for_clone_to_complete(clone2)
6837
6838 # verify clone
6839 self._verify_clone(clone1, snapshot, clone2)
6840
6841 # remove snapshot
6842 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot)
6843
6844 # remove subvolumes
6845 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6846 self._fs_cmd("subvolume", "rm", self.volname, clone1)
6847 self._fs_cmd("subvolume", "rm", self.volname, clone2)
6848
6849 # verify trash dir is clean
6850 self._wait_for_trash_empty()
6851
6852 def test_subvolume_snapshot_clone_cancel_in_progress(self):
6853 subvolume = self._generate_random_subvolume_name()
6854 snapshot = self._generate_random_snapshot_name()
6855 clone = self._generate_random_clone_name()
6856
6857 # create subvolume
6858 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6859
6860 # do some IO
6861 self._do_subvolume_io(subvolume, number_of_files=128)
6862
6863 # snapshot subvolume
6864 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6865
6866 # Insert delay at the beginning of snapshot clone
6867 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6868
6869 # schedule a clone
6870 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6871
6872 # cancel on-going clone
6873 self._fs_cmd("clone", "cancel", self.volname, clone)
6874
6875 # verify canceled state
6876 self._check_clone_canceled(clone)
6877
6878 # remove snapshot
6879 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6880
6881 # remove subvolumes
6882 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6883 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6884
6885 # verify trash dir is clean
6886 self._wait_for_trash_empty()
6887
6888 def test_subvolume_snapshot_clone_cancel_pending(self):
6889 """
6890 this test is a bit more involved compared to canceling an in-progress clone.
6891 we'd need to ensure that a to-be canceled clone has still not been picked up
6892 by cloner threads. exploit the fact that clones are picked up in an FCFS
6893 fashion and there are four (4) cloner threads by default. When the number of
6894 cloner threads increase, this test _may_ start tripping -- so, the number of
6895 clone operations would need to be jacked up.
6896 """
6897 # default number of clone threads
6898 NR_THREADS = 4
6899 # good enough for 4 threads
6900 NR_CLONES = 5
6901 # yeh, 1gig -- we need the clone to run for sometime
6902 FILE_SIZE_MB = 1024
6903
6904 subvolume = self._generate_random_subvolume_name()
6905 snapshot = self._generate_random_snapshot_name()
6906 clones = self._generate_random_clone_name(NR_CLONES)
6907
6908 # create subvolume
6909 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6910
6911 # do some IO
6912 self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB)
6913
6914 # snapshot subvolume
6915 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6916
6917 # schedule clones
6918 for clone in clones:
6919 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6920
6921 to_wait = clones[0:NR_THREADS]
6922 to_cancel = clones[NR_THREADS:]
6923
6924 # cancel pending clones and verify
6925 for clone in to_cancel:
6926 status = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
6927 self.assertEqual(status["status"]["state"], "pending")
6928 self._fs_cmd("clone", "cancel", self.volname, clone)
6929 self._check_clone_canceled(clone)
6930
6931 # let's cancel on-going clones. handle the case where some of the clones
6932 # _just_ complete
6933 for clone in list(to_wait):
6934 try:
6935 self._fs_cmd("clone", "cancel", self.volname, clone)
6936 to_cancel.append(clone)
6937 to_wait.remove(clone)
6938 except CommandFailedError as ce:
6939 if ce.exitstatus != errno.EINVAL:
6940 raise RuntimeError("invalid error code when cancelling on-going clone")
6941
6942 # remove snapshot
6943 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6944
6945 # remove subvolumes
6946 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6947 for clone in to_wait:
6948 self._fs_cmd("subvolume", "rm", self.volname, clone)
6949 for clone in to_cancel:
6950 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6951
6952 # verify trash dir is clean
6953 self._wait_for_trash_empty()
6954
6955 def test_subvolume_snapshot_clone_different_groups(self):
6956 subvolume = self._generate_random_subvolume_name()
6957 snapshot = self._generate_random_snapshot_name()
6958 clone = self._generate_random_clone_name()
6959 s_group, c_group = self._generate_random_group_name(2)
6960
6961 # create groups
6962 self._fs_cmd("subvolumegroup", "create", self.volname, s_group)
6963 self._fs_cmd("subvolumegroup", "create", self.volname, c_group)
6964
6965 # create subvolume
6966 self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group, "--mode=777")
6967
6968 # do some IO
6969 self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32)
6970
6971 # snapshot subvolume
6972 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group)
6973
6974 # schedule a clone
6975 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
6976 '--group_name', s_group, '--target_group_name', c_group)
6977
6978 # check clone status
6979 self._wait_for_clone_to_complete(clone, clone_group=c_group)
6980
6981 # verify clone
6982 self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group)
6983
6984 # remove snapshot
6985 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group)
6986
6987 # remove subvolumes
6988 self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group)
6989 self._fs_cmd("subvolume", "rm", self.volname, clone, c_group)
6990
6991 # remove groups
6992 self._fs_cmd("subvolumegroup", "rm", self.volname, s_group)
6993 self._fs_cmd("subvolumegroup", "rm", self.volname, c_group)
6994
6995 # verify trash dir is clean
6996 self._wait_for_trash_empty()
6997
6998 def test_subvolume_snapshot_clone_fail_with_remove(self):
6999 subvolume = self._generate_random_subvolume_name()
7000 snapshot = self._generate_random_snapshot_name()
7001 clone1, clone2 = self._generate_random_clone_name(2)
7002
7003 pool_capacity = 32 * 1024 * 1024
7004 # number of files required to fill up 99% of the pool
7005 nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
7006
7007 # create subvolume
7008 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
7009
7010 # do some IO
7011 self._do_subvolume_io(subvolume, number_of_files=nr_files)
7012
7013 # snapshot subvolume
7014 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7015
7016 # add data pool
7017 new_pool = "new_pool"
7018 self.fs.add_data_pool(new_pool)
7019
7020 self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
7021 "max_bytes", "{0}".format(pool_capacity // 4))
7022
7023 # schedule a clone
7024 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
7025
7026 # check clone status -- this should dramatically overshoot the pool quota
7027 self._wait_for_clone_to_complete(clone1)
7028
7029 # verify clone
7030 self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool)
7031
7032 # wait a bit so that subsequent I/O will give pool full error
7033 time.sleep(120)
7034
7035 # schedule a clone
7036 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool)
7037
7038 # check clone status
7039 self._wait_for_clone_to_fail(clone2)
7040
7041 # remove snapshot
7042 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7043
7044 # remove subvolumes
7045 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7046 self._fs_cmd("subvolume", "rm", self.volname, clone1)
7047 try:
7048 self._fs_cmd("subvolume", "rm", self.volname, clone2)
7049 except CommandFailedError as ce:
7050 if ce.exitstatus != errno.EAGAIN:
7051 raise RuntimeError("invalid error code when trying to remove failed clone")
7052 else:
7053 raise RuntimeError("expected error when removing a failed clone")
7054
7055 # ... and with force, failed clone can be removed
7056 self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force")
7057
7058 # verify trash dir is clean
7059 self._wait_for_trash_empty()
7060
7061 def test_subvolume_snapshot_clone_on_existing_subvolumes(self):
7062 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
7063 snapshot = self._generate_random_snapshot_name()
7064 clone = self._generate_random_clone_name()
7065
7066 # create subvolumes
7067 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--mode=777")
7068 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--mode=777")
7069
7070 # do some IO
7071 self._do_subvolume_io(subvolume1, number_of_files=32)
7072
7073 # snapshot subvolume
7074 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot)
7075
7076 # schedule a clone with target as subvolume2
7077 try:
7078 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2)
7079 except CommandFailedError as ce:
7080 if ce.exitstatus != errno.EEXIST:
7081 raise RuntimeError("invalid error code when cloning to existing subvolume")
7082 else:
7083 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
7084
7085 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
7086
7087 # schedule a clone with target as clone
7088 try:
7089 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
7090 except CommandFailedError as ce:
7091 if ce.exitstatus != errno.EEXIST:
7092 raise RuntimeError("invalid error code when cloning to existing clone")
7093 else:
7094 raise RuntimeError("expected cloning to fail if the target is an existing clone")
7095
7096 # check clone status
7097 self._wait_for_clone_to_complete(clone)
7098
7099 # verify clone
7100 self._verify_clone(subvolume1, snapshot, clone)
7101
7102 # remove snapshot
7103 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot)
7104
7105 # remove subvolumes
7106 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7107 self._fs_cmd("subvolume", "rm", self.volname, subvolume2)
7108 self._fs_cmd("subvolume", "rm", self.volname, clone)
7109
7110 # verify trash dir is clean
7111 self._wait_for_trash_empty()
7112
7113 def test_subvolume_snapshot_clone_pool_layout(self):
7114 subvolume = self._generate_random_subvolume_name()
7115 snapshot = self._generate_random_snapshot_name()
7116 clone = self._generate_random_clone_name()
7117
7118 # add data pool
7119 new_pool = "new_pool"
7120 newid = self.fs.add_data_pool(new_pool)
7121
7122 # create subvolume
7123 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
7124
7125 # do some IO
7126 self._do_subvolume_io(subvolume, number_of_files=32)
7127
7128 # snapshot subvolume
7129 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7130
7131 # schedule a clone
7132 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool)
7133
7134 # check clone status
7135 self._wait_for_clone_to_complete(clone)
7136
7137 # verify clone
7138 self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool)
7139
7140 # remove snapshot
7141 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7142
7143 subvol_path = self._get_subvolume_path(self.volname, clone)
7144 desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
7145 try:
7146 self.assertEqual(desired_pool, new_pool)
7147 except AssertionError:
7148 self.assertEqual(int(desired_pool), newid) # old kernel returns id
7149
7150 # remove subvolumes
7151 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7152 self._fs_cmd("subvolume", "rm", self.volname, clone)
7153
7154 # verify trash dir is clean
7155 self._wait_for_trash_empty()
7156
7157 def test_subvolume_snapshot_clone_under_group(self):
7158 subvolume = self._generate_random_subvolume_name()
7159 snapshot = self._generate_random_snapshot_name()
7160 clone = self._generate_random_clone_name()
7161 group = self._generate_random_group_name()
7162
7163 # create subvolume
7164 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
7165
7166 # do some IO
7167 self._do_subvolume_io(subvolume, number_of_files=32)
7168
7169 # snapshot subvolume
7170 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7171
7172 # create group
7173 self._fs_cmd("subvolumegroup", "create", self.volname, group)
7174
7175 # schedule a clone
7176 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group)
7177
7178 # check clone status
7179 self._wait_for_clone_to_complete(clone, clone_group=group)
7180
7181 # verify clone
7182 self._verify_clone(subvolume, snapshot, clone, clone_group=group)
7183
7184 # remove snapshot
7185 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7186
7187 # remove subvolumes
7188 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7189 self._fs_cmd("subvolume", "rm", self.volname, clone, group)
7190
7191 # remove group
7192 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7193
7194 # verify trash dir is clean
7195 self._wait_for_trash_empty()
7196
7197 def test_subvolume_snapshot_clone_with_attrs(self):
7198 subvolume = self._generate_random_subvolume_name()
7199 snapshot = self._generate_random_snapshot_name()
7200 clone = self._generate_random_clone_name()
7201
7202 mode = "777"
7203 uid = "1000"
7204 gid = "1000"
7205 new_uid = "1001"
7206 new_gid = "1001"
7207 new_mode = "700"
7208
7209 # create subvolume
7210 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
7211
7212 # do some IO
7213 self._do_subvolume_io(subvolume, number_of_files=32)
7214
7215 # snapshot subvolume
7216 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7217
7218 # change subvolume attrs (to ensure clone picks up snapshot attrs)
7219 self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode)
7220
7221 # schedule a clone
7222 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
7223
7224 # check clone status
7225 self._wait_for_clone_to_complete(clone)
7226
7227 # verify clone
7228 self._verify_clone(subvolume, snapshot, clone)
7229
7230 # remove snapshot
7231 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7232
7233 # remove subvolumes
7234 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7235 self._fs_cmd("subvolume", "rm", self.volname, clone)
7236
7237 # verify trash dir is clean
7238 self._wait_for_trash_empty()
7239
7240 def test_subvolume_snapshot_clone_with_upgrade(self):
7241 """
7242 yet another poor man's upgrade test -- rather than going through a full
7243 upgrade cycle, emulate old types subvolumes by going through the wormhole
7244 and verify clone operation.
7245 further ensure that a legacy volume is not updated to v2, but clone is.
7246 """
7247 subvolume = self._generate_random_subvolume_name()
7248 snapshot = self._generate_random_snapshot_name()
7249 clone = self._generate_random_clone_name()
7250
7251 # emulate a old-fashioned subvolume
7252 createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
7253 self.mount_a.run_shell_payload(f"sudo mkdir -p -m 777 {createpath}", omit_sudo=False)
7254
7255 # add required xattrs to subvolume
7256 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7257 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
7258
7259 # do some IO
7260 self._do_subvolume_io(subvolume, number_of_files=64)
7261
7262 # snapshot subvolume
7263 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7264
7265 # ensure metadata file is in legacy location, with required version v1
7266 self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
7267
7268 # Insert delay at the beginning of snapshot clone
7269 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7270
7271 # schedule a clone
7272 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
7273
7274 # snapshot should not be deletable now
7275 try:
7276 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7277 except CommandFailedError as ce:
7278 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
7279 else:
7280 self.fail("expected removing source snapshot of a clone to fail")
7281
7282 # check clone status
7283 self._wait_for_clone_to_complete(clone)
7284
7285 # verify clone
7286 self._verify_clone(subvolume, snapshot, clone, source_version=1)
7287
7288 # remove snapshot
7289 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7290
7291 # ensure metadata file is in v2 location, with required version v2
7292 self._assert_meta_location_and_version(self.volname, clone)
7293
7294 # remove subvolumes
7295 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7296 self._fs_cmd("subvolume", "rm", self.volname, clone)
7297
7298 # verify trash dir is clean
7299 self._wait_for_trash_empty()
7300
7301 def test_subvolume_snapshot_reconf_max_concurrent_clones(self):
7302 """
7303 Validate 'max_concurrent_clones' config option
7304 """
7305
7306 # get the default number of cloner threads
7307 default_max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7308 self.assertEqual(default_max_concurrent_clones, 4)
7309
7310 # Increase number of cloner threads
7311 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
7312 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7313 self.assertEqual(max_concurrent_clones, 6)
7314
7315 # Decrease number of cloner threads
7316 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7317 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7318 self.assertEqual(max_concurrent_clones, 2)
7319
7320 def test_subvolume_snapshot_config_snapshot_clone_delay(self):
7321 """
7322 Validate 'snapshot_clone_delay' config option
7323 """
7324
7325 # get the default delay before starting the clone
7326 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7327 self.assertEqual(default_timeout, 0)
7328
7329 # Insert delay of 2 seconds at the beginning of the snapshot clone
7330 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7331 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7332 self.assertEqual(default_timeout, 2)
7333
7334 # Decrease number of cloner threads
7335 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7336 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7337 self.assertEqual(max_concurrent_clones, 2)
7338
7339 def test_subvolume_under_group_snapshot_clone(self):
7340 subvolume = self._generate_random_subvolume_name()
7341 group = self._generate_random_group_name()
7342 snapshot = self._generate_random_snapshot_name()
7343 clone = self._generate_random_clone_name()
7344
7345 # create group
7346 self._fs_cmd("subvolumegroup", "create", self.volname, group)
7347
7348 # create subvolume
7349 self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777")
7350
7351 # do some IO
7352 self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32)
7353
7354 # snapshot subvolume
7355 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
7356
7357 # schedule a clone
7358 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group)
7359
7360 # check clone status
7361 self._wait_for_clone_to_complete(clone)
7362
7363 # verify clone
7364 self._verify_clone(subvolume, snapshot, clone, source_group=group)
7365
7366 # remove snapshot
7367 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
7368
7369 # remove subvolumes
7370 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
7371 self._fs_cmd("subvolume", "rm", self.volname, clone)
7372
7373 # remove group
7374 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7375
7376 # verify trash dir is clean
7377 self._wait_for_trash_empty()
7378
7379
7380 class TestMisc(TestVolumesHelper):
7381 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
7382 def test_connection_expiration(self):
7383 # unmount any cephfs mounts
7384 for i in range(0, self.CLIENTS_REQUIRED):
7385 self.mounts[i].umount_wait()
7386 sessions = self._session_list()
7387 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
7388
7389 # Get the mgr to definitely mount cephfs
7390 subvolume = self._generate_random_subvolume_name()
7391 self._fs_cmd("subvolume", "create", self.volname, subvolume)
7392 sessions = self._session_list()
7393 self.assertEqual(len(sessions), 1)
7394
7395 # Now wait for the mgr to expire the connection:
7396 self.wait_until_evicted(sessions[0]['id'], timeout=90)
7397
7398 def test_mgr_eviction(self):
7399 # unmount any cephfs mounts
7400 for i in range(0, self.CLIENTS_REQUIRED):
7401 self.mounts[i].umount_wait()
7402 sessions = self._session_list()
7403 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
7404
7405 # Get the mgr to definitely mount cephfs
7406 subvolume = self._generate_random_subvolume_name()
7407 self._fs_cmd("subvolume", "create", self.volname, subvolume)
7408 sessions = self._session_list()
7409 self.assertEqual(len(sessions), 1)
7410
7411 # Now fail the mgr, check the session was evicted
7412 mgr = self.mgr_cluster.get_active_id()
7413 self.mgr_cluster.mgr_fail(mgr)
7414 self.wait_until_evicted(sessions[0]['id'])
7415
7416 def test_names_can_only_be_goodchars(self):
7417 """
7418 Test the creating vols, subvols subvolgroups fails when their names uses
7419 characters beyond [a-zA-Z0-9 -_.].
7420 """
7421 volname, badname = 'testvol', 'abcd@#'
7422
7423 with self.assertRaises(CommandFailedError):
7424 self._fs_cmd('volume', 'create', badname)
7425 self._fs_cmd('volume', 'create', volname)
7426
7427 with self.assertRaises(CommandFailedError):
7428 self._fs_cmd('subvolumegroup', 'create', volname, badname)
7429
7430 with self.assertRaises(CommandFailedError):
7431 self._fs_cmd('subvolume', 'create', volname, badname)
7432 self._fs_cmd('volume', 'rm', volname, '--yes-i-really-mean-it')
7433
7434 def test_subvolume_ops_on_nonexistent_vol(self):
7435 # tests the fs subvolume operations on non existing volume
7436
7437 volname = "non_existent_subvolume"
7438
7439 # try subvolume operations
7440 for op in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
7441 try:
7442 if op == "resize":
7443 self._fs_cmd("subvolume", "resize", volname, "subvolname_1", "inf")
7444 elif op == "pin":
7445 self._fs_cmd("subvolume", "pin", volname, "subvolname_1", "export", "1")
7446 elif op == "ls":
7447 self._fs_cmd("subvolume", "ls", volname)
7448 else:
7449 self._fs_cmd("subvolume", op, volname, "subvolume_1")
7450 except CommandFailedError as ce:
7451 self.assertEqual(ce.exitstatus, errno.ENOENT)
7452 else:
7453 self.fail("expected the 'fs subvolume {0}' command to fail".format(op))
7454
7455 # try subvolume snapshot operations and clone create
7456 for op in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
7457 try:
7458 if op == "ls":
7459 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1")
7460 elif op == "clone":
7461 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1", "clone_1")
7462 else:
7463 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1")
7464 except CommandFailedError as ce:
7465 self.assertEqual(ce.exitstatus, errno.ENOENT)
7466 else:
7467 self.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op))
7468
7469 # try, clone status
7470 try:
7471 self._fs_cmd("clone", "status", volname, "clone_1")
7472 except CommandFailedError as ce:
7473 self.assertEqual(ce.exitstatus, errno.ENOENT)
7474 else:
7475 self.fail("expected the 'fs clone status' command to fail")
7476
7477 # try subvolumegroup operations
7478 for op in ("create", "rm", "getpath", "pin", "ls"):
7479 try:
7480 if op == "pin":
7481 self._fs_cmd("subvolumegroup", "pin", volname, "group_1", "export", "0")
7482 elif op == "ls":
7483 self._fs_cmd("subvolumegroup", op, volname)
7484 else:
7485 self._fs_cmd("subvolumegroup", op, volname, "group_1")
7486 except CommandFailedError as ce:
7487 self.assertEqual(ce.exitstatus, errno.ENOENT)
7488 else:
7489 self.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op))
7490
7491 # try subvolumegroup snapshot operations
7492 for op in ("create", "rm", "ls"):
7493 try:
7494 if op == "ls":
7495 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1")
7496 else:
7497 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1", "snapshot_1")
7498 except CommandFailedError as ce:
7499 self.assertEqual(ce.exitstatus, errno.ENOENT)
7500 else:
7501 self.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op))
7502
7503 def test_subvolume_upgrade_legacy_to_v1(self):
7504 """
7505 poor man's upgrade test -- rather than going through a full upgrade cycle,
7506 emulate subvolumes by going through the wormhole and verify if they are
7507 accessible.
7508 further ensure that a legacy volume is not updated to v2.
7509 """
7510 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
7511 group = self._generate_random_group_name()
7512
7513 # emulate a old-fashioned subvolume -- one in the default group and
7514 # the other in a custom group
7515 createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
7516 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
7517
7518 # create group
7519 createpath2 = os.path.join(".", "volumes", group, subvolume2)
7520 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath2], omit_sudo=False)
7521
7522 # this would auto-upgrade on access without anyone noticing
7523 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
7524 self.assertNotEqual(subvolpath1, None)
7525 subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
7526
7527 subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
7528 self.assertNotEqual(subvolpath2, None)
7529 subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline
7530
7531 # and... the subvolume path returned should be what we created behind the scene
7532 self.assertEqual(createpath1[1:], subvolpath1)
7533 self.assertEqual(createpath2[1:], subvolpath2)
7534
7535 # ensure metadata file is in legacy location, with required version v1
7536 self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True)
7537 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True)
7538
7539 # remove subvolume
7540 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7541 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
7542
7543 # verify trash dir is clean
7544 self._wait_for_trash_empty()
7545
7546 # remove group
7547 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7548
7549 def test_subvolume_no_upgrade_v1_sanity(self):
7550 """
7551 poor man's upgrade test -- theme continues...
7552
7553 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
7554 a series of operations on the v1 subvolume to ensure they work as expected.
7555 """
7556 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
7557 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
7558 "type", "uid", "features", "state"]
7559 snap_md = ["created_at", "data_pool", "has_pending_clones"]
7560
7561 subvolume = self._generate_random_subvolume_name()
7562 snapshot = self._generate_random_snapshot_name()
7563 clone1, clone2 = self._generate_random_clone_name(2)
7564 mode = "777"
7565 uid = "1000"
7566 gid = "1000"
7567
7568 # emulate a v1 subvolume -- in the default group
7569 subvolume_path = self._create_v1_subvolume(subvolume)
7570
7571 # getpath
7572 subvolpath = self._get_subvolume_path(self.volname, subvolume)
7573 self.assertEqual(subvolpath, subvolume_path)
7574
7575 # ls
7576 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
7577 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
7578 self.assertEqual(subvolumes[0]['name'], subvolume,
7579 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
7580
7581 # info
7582 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
7583 for md in subvol_md:
7584 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
7585
7586 self.assertEqual(subvol_info["state"], "complete",
7587 msg="expected state to be 'complete', found '{0}".format(subvol_info["state"]))
7588 self.assertEqual(len(subvol_info["features"]), 2,
7589 msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
7590 for feature in ['snapshot-clone', 'snapshot-autoprotect']:
7591 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
7592
7593 # resize
7594 nsize = self.DEFAULT_FILE_SIZE*1024*1024*10
7595 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
7596 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
7597 for md in subvol_md:
7598 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
7599 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
7600
7601 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
7602 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
7603
7604 # do some IO
7605 self._do_subvolume_io(subvolume, number_of_files=8)
7606
7607 # snap-create
7608 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7609
7610 # clone
7611 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
7612
7613 # check clone status
7614 self._wait_for_clone_to_complete(clone1)
7615
7616 # ensure clone is v2
7617 self._assert_meta_location_and_version(self.volname, clone1, version=2)
7618
7619 # verify clone
7620 self._verify_clone(subvolume, snapshot, clone1, source_version=1)
7621
7622 # clone (older snapshot)
7623 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2)
7624
7625 # check clone status
7626 self._wait_for_clone_to_complete(clone2)
7627
7628 # ensure clone is v2
7629 self._assert_meta_location_and_version(self.volname, clone2, version=2)
7630
7631 # verify clone
7632 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
7633 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
7634
7635 # snap-info
7636 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
7637 for md in snap_md:
7638 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
7639 self.assertEqual(snap_info["has_pending_clones"], "no")
7640
7641 # snap-ls
7642 subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
7643 self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots)))
7644 snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots]
7645 for name in [snapshot, 'fake']:
7646 self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name))
7647
7648 # snap-rm
7649 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7650 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake")
7651
7652 # ensure volume is still at version 1
7653 self._assert_meta_location_and_version(self.volname, subvolume, version=1)
7654
7655 # rm
7656 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7657 self._fs_cmd("subvolume", "rm", self.volname, clone1)
7658 self._fs_cmd("subvolume", "rm", self.volname, clone2)
7659
7660 # verify trash dir is clean
7661 self._wait_for_trash_empty()
7662
7663 def test_subvolume_no_upgrade_v1_to_v2(self):
7664 """
7665 poor man's upgrade test -- theme continues...
7666 ensure v1 to v2 upgrades are not done automatically due to various states of v1
7667 """
7668 subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3)
7669 group = self._generate_random_group_name()
7670
7671 # emulate a v1 subvolume -- in the default group
7672 subvol1_path = self._create_v1_subvolume(subvolume1)
7673
7674 # emulate a v1 subvolume -- in a custom group
7675 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group)
7676
7677 # emulate a v1 subvolume -- in a clone pending state
7678 self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending')
7679
7680 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
7681 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
7682 self.assertEqual(subvolpath1, subvol1_path)
7683
7684 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
7685 self.assertEqual(subvolpath2, subvol2_path)
7686
7687 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
7688 # use clone status, as only certain operations are allowed in pending state
7689 status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3))
7690 self.assertEqual(status["status"]["state"], "pending")
7691
7692 # remove snapshot
7693 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake")
7694 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group)
7695
7696 # ensure metadata file is in v1 location, with version retained as v1
7697 self._assert_meta_location_and_version(self.volname, subvolume1, version=1)
7698 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1)
7699
7700 # remove subvolume
7701 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7702 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
7703 try:
7704 self._fs_cmd("subvolume", "rm", self.volname, subvolume3)
7705 except CommandFailedError as ce:
7706 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone")
7707 else:
7708 self.fail("expected rm of subvolume undergoing clone to fail")
7709
7710 # ensure metadata file is in v1 location, with version retained as v1
7711 self._assert_meta_location_and_version(self.volname, subvolume3, version=1)
7712 self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force")
7713
7714 # verify list subvolumes returns an empty list
7715 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
7716 self.assertEqual(len(subvolumels), 0)
7717
7718 # verify trash dir is clean
7719 self._wait_for_trash_empty()
7720
7721 def test_subvolume_upgrade_v1_to_v2(self):
7722 """
7723 poor man's upgrade test -- theme continues...
7724 ensure v1 to v2 upgrades work
7725 """
7726 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
7727 group = self._generate_random_group_name()
7728
7729 # emulate a v1 subvolume -- in the default group
7730 subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False)
7731
7732 # emulate a v1 subvolume -- in a custom group
7733 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False)
7734
7735 # this would attempt auto-upgrade on access
7736 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
7737 self.assertEqual(subvolpath1, subvol1_path)
7738
7739 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
7740 self.assertEqual(subvolpath2, subvol2_path)
7741
7742 # ensure metadata file is in v2 location, with version retained as v2
7743 self._assert_meta_location_and_version(self.volname, subvolume1, version=2)
7744 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2)
7745
7746 # remove subvolume
7747 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7748 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
7749
7750 # verify trash dir is clean
7751 self._wait_for_trash_empty()
7752
7753 def test_malicious_metafile_on_legacy_to_v1_upgrade(self):
7754 """
7755 Validate handcrafted .meta file on legacy subvol root doesn't break the system
7756 on legacy subvol upgrade to v1
7757 poor man's upgrade test -- theme continues...
7758 """
7759 subvol1, subvol2 = self._generate_random_subvolume_name(2)
7760
7761 # emulate a old-fashioned subvolume in the default group
7762 createpath1 = os.path.join(".", "volumes", "_nogroup", subvol1)
7763 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
7764
7765 # add required xattrs to subvolume
7766 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7767 self.mount_a.setfattr(createpath1, 'ceph.dir.layout.pool', default_pool, sudo=True)
7768
7769 # create v2 subvolume
7770 self._fs_cmd("subvolume", "create", self.volname, subvol2)
7771
7772 # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume
7773 # .meta into legacy subvol1's root
7774 subvol2_metapath = os.path.join(".", "volumes", "_nogroup", subvol2, ".meta")
7775 self.mount_a.run_shell(['sudo', 'cp', subvol2_metapath, createpath1], omit_sudo=False)
7776
7777 # Upgrade legacy subvol1 to v1
7778 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvol1)
7779 self.assertNotEqual(subvolpath1, None)
7780 subvolpath1 = subvolpath1.rstrip()
7781
7782 # the subvolume path returned should not be of subvol2 from handcrafted
7783 # .meta file
7784 self.assertEqual(createpath1[1:], subvolpath1)
7785
7786 # ensure metadata file is in legacy location, with required version v1
7787 self._assert_meta_location_and_version(self.volname, subvol1, version=1, legacy=True)
7788
7789 # Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2
7790 # path whose '.meta' file is copied to subvol1 root
7791 authid1 = "alice"
7792 self._fs_cmd("subvolume", "authorize", self.volname, subvol1, authid1)
7793
7794 # Validate that the mds path added is of subvol1 and not of subvol2
7795 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
7796 self.assertEqual("client.alice", out[0]["entity"])
7797 self.assertEqual("allow rw path={0}".format(createpath1[1:]), out[0]["caps"]["mds"])
7798
7799 # remove subvolume
7800 self._fs_cmd("subvolume", "rm", self.volname, subvol1)
7801 self._fs_cmd("subvolume", "rm", self.volname, subvol2)
7802
7803 # verify trash dir is clean
7804 self._wait_for_trash_empty()
7805
7806 def test_binary_metafile_on_legacy_to_v1_upgrade(self):
7807 """
7808 Validate binary .meta file on legacy subvol root doesn't break the system
7809 on legacy subvol upgrade to v1
7810 poor man's upgrade test -- theme continues...
7811 """
7812 subvol = self._generate_random_subvolume_name()
7813 group = self._generate_random_group_name()
7814
7815 # emulate a old-fashioned subvolume -- in a custom group
7816 createpath = os.path.join(".", "volumes", group, subvol)
7817 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
7818
7819 # add required xattrs to subvolume
7820 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7821 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
7822
7823 # Create unparseable binary .meta file on legacy subvol's root
7824 meta_contents = os.urandom(4096)
7825 meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta")
7826 self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True)
7827
7828 # Upgrade legacy subvol to v1
7829 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group)
7830 self.assertNotEqual(subvolpath, None)
7831 subvolpath = subvolpath.rstrip()
7832
7833 # The legacy subvolume path should be returned for subvol.
7834 # Should ignore unparseable binary .meta file in subvol's root
7835 self.assertEqual(createpath[1:], subvolpath)
7836
7837 # ensure metadata file is in legacy location, with required version v1
7838 self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True)
7839
7840 # remove subvolume
7841 self._fs_cmd("subvolume", "rm", self.volname, subvol, group)
7842
7843 # verify trash dir is clean
7844 self._wait_for_trash_empty()
7845
7846 # remove group
7847 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7848
7849 def test_unparseable_metafile_on_legacy_to_v1_upgrade(self):
7850 """
7851 Validate unparseable text .meta file on legacy subvol root doesn't break the system
7852 on legacy subvol upgrade to v1
7853 poor man's upgrade test -- theme continues...
7854 """
7855 subvol = self._generate_random_subvolume_name()
7856 group = self._generate_random_group_name()
7857
7858 # emulate a old-fashioned subvolume -- in a custom group
7859 createpath = os.path.join(".", "volumes", group, subvol)
7860 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
7861
7862 # add required xattrs to subvolume
7863 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7864 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
7865
7866 # Create unparseable text .meta file on legacy subvol's root
7867 meta_contents = "unparseable config\nfile ...\nunparseable config\nfile ...\n"
7868 meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta")
7869 self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True)
7870
7871 # Upgrade legacy subvol to v1
7872 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group)
7873 self.assertNotEqual(subvolpath, None)
7874 subvolpath = subvolpath.rstrip()
7875
7876 # The legacy subvolume path should be returned for subvol.
7877 # Should ignore unparseable binary .meta file in subvol's root
7878 self.assertEqual(createpath[1:], subvolpath)
7879
7880 # ensure metadata file is in legacy location, with required version v1
7881 self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True)
7882
7883 # remove subvolume
7884 self._fs_cmd("subvolume", "rm", self.volname, subvol, group)
7885
7886 # verify trash dir is clean
7887 self._wait_for_trash_empty()
7888
7889 # remove group
7890 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7891
7892 class TestPerModuleFinsherThread(TestVolumesHelper):
7893 """
7894 Per module finisher thread tests related to mgr/volume cmds.
7895 This is used in conjuction with check_counter with min val being 4
7896 as four subvolume cmds are run
7897 """
7898 def test_volumes_module_finisher_thread(self):
7899 subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
7900 group = self._generate_random_group_name()
7901
7902 # create group
7903 self._fs_cmd("subvolumegroup", "create", self.volname, group)
7904
7905 # create subvolumes in group
7906 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
7907 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group)
7908 self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group)
7909
7910 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
7911 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
7912 self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
7913 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7914
7915 # verify trash dir is clean
7916 self._wait_for_trash_empty()