]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volumes.py
bump version to 18.2.2-pve1
[ceph.git] / ceph / qa / tasks / cephfs / test_volumes.py
1 import os
2 import json
3 import time
4 import errno
5 import random
6 import logging
7 import collections
8 import uuid
9 import unittest
10 from hashlib import md5
11 from textwrap import dedent
12 from io import StringIO
13
14 from tasks.cephfs.cephfs_test_case import CephFSTestCase
15 from tasks.cephfs.fuse_mount import FuseMount
16 from teuthology.exceptions import CommandFailedError
17
18 log = logging.getLogger(__name__)
19
20 class TestVolumesHelper(CephFSTestCase):
21 """Helper class for testing FS volume, subvolume group and subvolume operations."""
22 TEST_VOLUME_PREFIX = "volume"
23 TEST_SUBVOLUME_PREFIX="subvolume"
24 TEST_GROUP_PREFIX="group"
25 TEST_SNAPSHOT_PREFIX="snapshot"
26 TEST_CLONE_PREFIX="clone"
27 TEST_FILE_NAME_PREFIX="subvolume_file"
28
29 # for filling subvolume with data
30 CLIENTS_REQUIRED = 2
31 MDSS_REQUIRED = 2
32
33 # io defaults
34 DEFAULT_FILE_SIZE = 1 # MB
35 DEFAULT_NUMBER_OF_FILES = 1024
36
37 def _fs_cmd(self, *args):
38 return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
39
40 def _raw_cmd(self, *args):
41 return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
42
43 def __check_clone_state(self, state, clone, clone_group=None, timo=120):
44 check = 0
45 args = ["clone", "status", self.volname, clone]
46 if clone_group:
47 args.append(clone_group)
48 args = tuple(args)
49 while check < timo:
50 result = json.loads(self._fs_cmd(*args))
51 if result["status"]["state"] == state:
52 break
53 check += 1
54 time.sleep(1)
55 self.assertTrue(check < timo)
56
57 def _get_clone_status(self, clone, clone_group=None):
58 args = ["clone", "status", self.volname, clone]
59 if clone_group:
60 args.append(clone_group)
61 args = tuple(args)
62 result = json.loads(self._fs_cmd(*args))
63 return result
64
65 def _wait_for_clone_to_complete(self, clone, clone_group=None, timo=120):
66 self.__check_clone_state("complete", clone, clone_group, timo)
67
68 def _wait_for_clone_to_fail(self, clone, clone_group=None, timo=120):
69 self.__check_clone_state("failed", clone, clone_group, timo)
70
71 def _wait_for_clone_to_be_in_progress(self, clone, clone_group=None, timo=120):
72 self.__check_clone_state("in-progress", clone, clone_group, timo)
73
74 def _check_clone_canceled(self, clone, clone_group=None):
75 self.__check_clone_state("canceled", clone, clone_group, timo=1)
76
77 def _get_subvolume_snapshot_path(self, subvolume, snapshot, source_group, subvol_path, source_version):
78 if source_version == 2:
79 # v2
80 if subvol_path is not None:
81 (base_path, uuid_str) = os.path.split(subvol_path)
82 else:
83 (base_path, uuid_str) = os.path.split(self._get_subvolume_path(self.volname, subvolume, group_name=source_group))
84 return os.path.join(base_path, ".snap", snapshot, uuid_str)
85
86 # v1
87 base_path = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
88 return os.path.join(base_path, ".snap", snapshot)
89
90 def _verify_clone_attrs(self, source_path, clone_path):
91 path1 = source_path
92 path2 = clone_path
93
94 p = self.mount_a.run_shell(["find", path1])
95 paths = p.stdout.getvalue().strip().split()
96
97 # for each entry in source and clone (sink) verify certain inode attributes:
98 # inode type, mode, ownership, [am]time.
99 for source_path in paths:
100 sink_entry = source_path[len(path1)+1:]
101 sink_path = os.path.join(path2, sink_entry)
102
103 # mode+type
104 sval = int(self.mount_a.run_shell(['stat', '-c' '%f', source_path]).stdout.getvalue().strip(), 16)
105 cval = int(self.mount_a.run_shell(['stat', '-c' '%f', sink_path]).stdout.getvalue().strip(), 16)
106 self.assertEqual(sval, cval)
107
108 # ownership
109 sval = int(self.mount_a.run_shell(['stat', '-c' '%u', source_path]).stdout.getvalue().strip())
110 cval = int(self.mount_a.run_shell(['stat', '-c' '%u', sink_path]).stdout.getvalue().strip())
111 self.assertEqual(sval, cval)
112
113 sval = int(self.mount_a.run_shell(['stat', '-c' '%g', source_path]).stdout.getvalue().strip())
114 cval = int(self.mount_a.run_shell(['stat', '-c' '%g', sink_path]).stdout.getvalue().strip())
115 self.assertEqual(sval, cval)
116
117 # inode timestamps
118 # do not check access as kclient will generally not update this like ceph-fuse will.
119 sval = int(self.mount_a.run_shell(['stat', '-c' '%Y', source_path]).stdout.getvalue().strip())
120 cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip())
121 self.assertEqual(sval, cval)
122
123 def _verify_clone_root(self, source_path, clone_path, clone, clone_group, clone_pool):
124 # verifies following clone root attrs quota, data_pool and pool_namespace
125 # remaining attributes of clone root are validated in _verify_clone_attrs
126
127 clone_info = json.loads(self._get_subvolume_info(self.volname, clone, clone_group))
128
129 # verify quota is inherited from source snapshot
130 src_quota = self.mount_a.getfattr(source_path, "ceph.quota.max_bytes")
131 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
132 if isinstance(self.mount_a, FuseMount):
133 self.assertEqual(clone_info["bytes_quota"], "infinite" if src_quota is None else int(src_quota))
134
135 if clone_pool:
136 # verify pool is set as per request
137 self.assertEqual(clone_info["data_pool"], clone_pool)
138 else:
139 # verify pool and pool namespace are inherited from snapshot
140 self.assertEqual(clone_info["data_pool"],
141 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool"))
142 self.assertEqual(clone_info["pool_namespace"],
143 self.mount_a.getfattr(source_path, "ceph.dir.layout.pool_namespace"))
144
145 def _verify_clone(self, subvolume, snapshot, clone,
146 source_group=None, clone_group=None, clone_pool=None,
147 subvol_path=None, source_version=2, timo=120):
148 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
149 # but snapshots are retained for clone verification
150 path1 = self._get_subvolume_snapshot_path(subvolume, snapshot, source_group, subvol_path, source_version)
151 path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group)
152
153 check = 0
154 # TODO: currently snapshot rentries are not stable if snapshot source entries
155 # are removed, https://tracker.ceph.com/issues/46747
156 while check < timo and subvol_path is None:
157 val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries"))
158 val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries"))
159 if val1 == val2:
160 break
161 check += 1
162 time.sleep(1)
163 self.assertTrue(check < timo)
164
165 self._verify_clone_root(path1, path2, clone, clone_group, clone_pool)
166 self._verify_clone_attrs(path1, path2)
167
168 def _generate_random_volume_name(self, count=1):
169 n = self.volume_start
170 volumes = [f"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
171 self.volume_start += count
172 return volumes[0] if count == 1 else volumes
173
174 def _generate_random_subvolume_name(self, count=1):
175 n = self.subvolume_start
176 subvolumes = [f"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i in range(n, n+count)]
177 self.subvolume_start += count
178 return subvolumes[0] if count == 1 else subvolumes
179
180 def _generate_random_group_name(self, count=1):
181 n = self.group_start
182 groups = [f"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i in range(n, n+count)]
183 self.group_start += count
184 return groups[0] if count == 1 else groups
185
186 def _generate_random_snapshot_name(self, count=1):
187 n = self.snapshot_start
188 snaps = [f"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i in range(n, n+count)]
189 self.snapshot_start += count
190 return snaps[0] if count == 1 else snaps
191
192 def _generate_random_clone_name(self, count=1):
193 n = self.clone_start
194 clones = [f"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i in range(n, n+count)]
195 self.clone_start += count
196 return clones[0] if count == 1 else clones
197
198 def _enable_multi_fs(self):
199 self._fs_cmd("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
200
201 def _create_or_reuse_test_volume(self):
202 result = json.loads(self._fs_cmd("volume", "ls"))
203 if len(result) == 0:
204 self.vol_created = True
205 self.volname = self._generate_random_volume_name()
206 self._fs_cmd("volume", "create", self.volname)
207 else:
208 self.volname = result[0]['name']
209
210 def _get_volume_info(self, vol_name, human_readable=False):
211 if human_readable:
212 args = ["volume", "info", vol_name, human_readable]
213 else:
214 args = ["volume", "info", vol_name]
215 args = tuple(args)
216 vol_md = self._fs_cmd(*args)
217 return vol_md
218
219 def _get_subvolume_group_path(self, vol_name, group_name):
220 args = ("subvolumegroup", "getpath", vol_name, group_name)
221 path = self._fs_cmd(*args)
222 # remove the leading '/', and trailing whitespaces
223 return path[1:].rstrip()
224
225 def _get_subvolume_group_info(self, vol_name, group_name):
226 args = ["subvolumegroup", "info", vol_name, group_name]
227 args = tuple(args)
228 group_md = self._fs_cmd(*args)
229 return group_md
230
231 def _get_subvolume_path(self, vol_name, subvol_name, group_name=None):
232 args = ["subvolume", "getpath", vol_name, subvol_name]
233 if group_name:
234 args.append(group_name)
235 args = tuple(args)
236 path = self._fs_cmd(*args)
237 # remove the leading '/', and trailing whitespaces
238 return path[1:].rstrip()
239
240 def _get_subvolume_info(self, vol_name, subvol_name, group_name=None):
241 args = ["subvolume", "info", vol_name, subvol_name]
242 if group_name:
243 args.append(group_name)
244 args = tuple(args)
245 subvol_md = self._fs_cmd(*args)
246 return subvol_md
247
248 def _get_subvolume_snapshot_info(self, vol_name, subvol_name, snapname, group_name=None):
249 args = ["subvolume", "snapshot", "info", vol_name, subvol_name, snapname]
250 if group_name:
251 args.append(group_name)
252 args = tuple(args)
253 snap_md = self._fs_cmd(*args)
254 return snap_md
255
256 def _delete_test_volume(self):
257 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
258
259 def _do_subvolume_pool_and_namespace_update(self, subvolume, pool=None, pool_namespace=None, subvolume_group=None):
260 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
261
262 if pool is not None:
263 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool, sudo=True)
264
265 if pool_namespace is not None:
266 self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace, sudo=True)
267
268 def _do_subvolume_attr_update(self, subvolume, uid, gid, mode, subvolume_group=None):
269 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
270
271 # mode
272 self.mount_a.run_shell(['sudo', 'chmod', mode, subvolpath], omit_sudo=False)
273
274 # ownership
275 self.mount_a.run_shell(['sudo', 'chown', uid, subvolpath], omit_sudo=False)
276 self.mount_a.run_shell(['sudo', 'chgrp', gid, subvolpath], omit_sudo=False)
277
278 def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None,
279 number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE):
280 # get subvolume path for IO
281 args = ["subvolume", "getpath", self.volname, subvolume]
282 if subvolume_group:
283 args.append(subvolume_group)
284 args = tuple(args)
285 subvolpath = self._fs_cmd(*args)
286 self.assertNotEqual(subvolpath, None)
287 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
288
289 io_path = subvolpath
290 if create_dir:
291 io_path = os.path.join(subvolpath, create_dir)
292 self.mount_a.run_shell_payload(f"mkdir -p {io_path}")
293
294 log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path))
295 for i in range(number_of_files):
296 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
297 self.mount_a.write_n_mb(os.path.join(io_path, filename), file_size)
298
299 def _do_subvolume_io_mixed(self, subvolume, subvolume_group=None):
300 subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
301
302 reg_file = "regfile.0"
303 dir_path = os.path.join(subvolpath, "dir.0")
304 sym_path1 = os.path.join(subvolpath, "sym.0")
305 # this symlink's ownership would be changed
306 sym_path2 = os.path.join(dir_path, "sym.0")
307
308 self.mount_a.run_shell(["mkdir", dir_path])
309 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path1])
310 self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path2])
311 # flip ownership to nobody. assumption: nobody's id is 65534
312 self.mount_a.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2], omit_sudo=False)
313
314 def _wait_for_trash_empty(self, timeout=60):
315 # XXX: construct the trash dir path (note that there is no mgr
316 # [sub]volume interface for this).
317 trashdir = os.path.join("./", "volumes", "_deleting")
318 self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
319
320 def _wait_for_subvol_trash_empty(self, subvol, group="_nogroup", timeout=30):
321 trashdir = os.path.join("./", "volumes", group, subvol, ".trash")
322 try:
323 self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
324 except CommandFailedError as ce:
325 if ce.exitstatus != errno.ENOENT:
326 pass
327 else:
328 raise
329
330 def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False):
331 if legacy:
332 subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group)
333 m = md5()
334 m.update(("/"+subvol_path).encode('utf-8'))
335 meta_filename = "{0}.meta".format(m.digest().hex())
336 metapath = os.path.join(".", "volumes", "_legacy", meta_filename)
337 else:
338 group = subvol_group if subvol_group is not None else '_nogroup'
339 metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
340
341 out = self.mount_a.run_shell(['sudo', 'cat', metapath], omit_sudo=False)
342 lines = out.stdout.getvalue().strip().split('\n')
343 sv_version = -1
344 for line in lines:
345 if line == "version = " + str(version):
346 sv_version = version
347 break
348 self.assertEqual(sv_version, version, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
349 version, sv_version, metapath))
350
351 def _create_v1_subvolume(self, subvol_name, subvol_group=None, has_snapshot=True, subvol_type='subvolume', state='complete'):
352 group = subvol_group if subvol_group is not None else '_nogroup'
353 basepath = os.path.join("volumes", group, subvol_name)
354 uuid_str = str(uuid.uuid4())
355 createpath = os.path.join(basepath, uuid_str)
356 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
357
358 # create a v1 snapshot, to prevent auto upgrades
359 if has_snapshot:
360 snappath = os.path.join(createpath, ".snap", "fake")
361 self.mount_a.run_shell(['sudo', 'mkdir', '-p', snappath], omit_sudo=False)
362
363 # add required xattrs to subvolume
364 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
365 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
366
367 # create a v1 .meta file
368 meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, "/" + createpath, state)
369 if state == 'pending':
370 # add a fake clone source
371 meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
372 meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta")
373 self.mount_a.client_remote.write_file(meta_filepath1, meta_contents, sudo=True)
374 return createpath
375
376 def _update_fake_trash(self, subvol_name, subvol_group=None, trash_name='fake', create=True):
377 group = subvol_group if subvol_group is not None else '_nogroup'
378 trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name)
379 if create:
380 self.mount_a.run_shell(['sudo', 'mkdir', '-p', trashpath], omit_sudo=False)
381 else:
382 self.mount_a.run_shell(['sudo', 'rmdir', trashpath], omit_sudo=False)
383
384 def _configure_guest_auth(self, guest_mount, authid, key):
385 """
386 Set up auth credentials for a guest client.
387 """
388 # Create keyring file for the guest client.
389 keyring_txt = dedent("""
390 [client.{authid}]
391 key = {key}
392
393 """.format(authid=authid,key=key))
394
395 guest_mount.client_id = authid
396 guest_mount.client_remote.write_file(guest_mount.get_keyring_path(),
397 keyring_txt, sudo=True)
398 # Add a guest client section to the ceph config file.
399 self.config_set("client.{0}".format(authid), "debug client", 20)
400 self.config_set("client.{0}".format(authid), "debug objecter", 20)
401 self.set_conf("client.{0}".format(authid),
402 "keyring", guest_mount.get_keyring_path())
403
404 def _auth_metadata_get(self, filedata):
405 """
406 Return a deserialized JSON object, or None
407 """
408 try:
409 data = json.loads(filedata)
410 except json.decoder.JSONDecodeError:
411 data = None
412 return data
413
414 def setUp(self):
415 super(TestVolumesHelper, self).setUp()
416 self.volname = None
417 self.vol_created = False
418 self._enable_multi_fs()
419 self._create_or_reuse_test_volume()
420 self.config_set('mon', 'mon_allow_pool_delete', True)
421 self.volume_start = random.randint(1, (1<<20))
422 self.subvolume_start = random.randint(1, (1<<20))
423 self.group_start = random.randint(1, (1<<20))
424 self.snapshot_start = random.randint(1, (1<<20))
425 self.clone_start = random.randint(1, (1<<20))
426
427 def tearDown(self):
428 if self.vol_created:
429 self._delete_test_volume()
430 super(TestVolumesHelper, self).tearDown()
431
432
433 class TestVolumes(TestVolumesHelper):
434 """Tests for FS volume operations."""
435 def test_volume_create(self):
436 """
437 That the volume can be created and then cleans up
438 """
439 volname = self._generate_random_volume_name()
440 self._fs_cmd("volume", "create", volname)
441 volumels = json.loads(self._fs_cmd("volume", "ls"))
442
443 if not (volname in ([volume['name'] for volume in volumels])):
444 raise RuntimeError("Error creating volume '{0}'".format(volname))
445
446 # check that the pools were created with the correct config
447 pool_details = json.loads(self._raw_cmd("osd", "pool", "ls", "detail", "--format=json"))
448 pool_flags = {}
449 for pool in pool_details:
450 pool_flags[pool["pool_id"]] = pool["flags_names"].split(",")
451
452 volume_details = json.loads(self._fs_cmd("get", volname, "--format=json"))
453 for data_pool_id in volume_details['mdsmap']['data_pools']:
454 self.assertIn("bulk", pool_flags[data_pool_id])
455 meta_pool_id = volume_details['mdsmap']['metadata_pool']
456 self.assertNotIn("bulk", pool_flags[meta_pool_id])
457
458 # clean up
459 self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
460
461 def test_volume_ls(self):
462 """
463 That the existing and the newly created volumes can be listed and
464 finally cleans up.
465 """
466 vls = json.loads(self._fs_cmd("volume", "ls"))
467 volumes = [volume['name'] for volume in vls]
468
469 #create new volumes and add it to the existing list of volumes
470 volumenames = self._generate_random_volume_name(2)
471 for volumename in volumenames:
472 self._fs_cmd("volume", "create", volumename)
473 volumes.extend(volumenames)
474
475 # list volumes
476 try:
477 volumels = json.loads(self._fs_cmd('volume', 'ls'))
478 if len(volumels) == 0:
479 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
480 else:
481 volnames = [volume['name'] for volume in volumels]
482 if collections.Counter(volnames) != collections.Counter(volumes):
483 raise RuntimeError("Error creating or listing volumes")
484 finally:
485 # clean up
486 for volume in volumenames:
487 self._fs_cmd("volume", "rm", volume, "--yes-i-really-mean-it")
488
489 def test_volume_rm(self):
490 """
491 That the volume can only be removed when --yes-i-really-mean-it is used
492 and verify that the deleted volume is not listed anymore.
493 """
494 for m in self.mounts:
495 m.umount_wait()
496 try:
497 self._fs_cmd("volume", "rm", self.volname)
498 except CommandFailedError as ce:
499 if ce.exitstatus != errno.EPERM:
500 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
501 "but it failed with {0}".format(ce.exitstatus))
502 else:
503 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
504
505 #check if it's gone
506 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
507 if (self.volname in [volume['name'] for volume in volumes]):
508 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
509 "The volume {0} not removed.".format(self.volname))
510 else:
511 raise RuntimeError("expected the 'fs volume rm' command to fail.")
512
513 def test_volume_rm_arbitrary_pool_removal(self):
514 """
515 That the arbitrary pool added to the volume out of band is removed
516 successfully on volume removal.
517 """
518 for m in self.mounts:
519 m.umount_wait()
520 new_pool = "new_pool"
521 # add arbitrary data pool
522 self.fs.add_data_pool(new_pool)
523 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
524 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
525
526 #check if fs is gone
527 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
528 volnames = [volume['name'] for volume in volumes]
529 self.assertNotIn(self.volname, volnames)
530
531 #check if osd pools are gone
532 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
533 for pool in vol_status["pools"]:
534 self.assertNotIn(pool["name"], pools)
535
536 def test_volume_rm_when_mon_delete_pool_false(self):
537 """
538 That the volume can only be removed when mon_allowd_pool_delete is set
539 to true and verify that the pools are removed after volume deletion.
540 """
541 for m in self.mounts:
542 m.umount_wait()
543 self.config_set('mon', 'mon_allow_pool_delete', False)
544 try:
545 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
546 except CommandFailedError as ce:
547 self.assertEqual(ce.exitstatus, errno.EPERM,
548 "expected the 'fs volume rm' command to fail with EPERM, "
549 "but it failed with {0}".format(ce.exitstatus))
550 vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
551 self.config_set('mon', 'mon_allow_pool_delete', True)
552 self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
553
554 #check if fs is gone
555 volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
556 volnames = [volume['name'] for volume in volumes]
557 self.assertNotIn(self.volname, volnames,
558 "volume {0} exists after removal".format(self.volname))
559 #check if pools are gone
560 pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
561 for pool in vol_status["pools"]:
562 self.assertNotIn(pool["name"], pools,
563 "pool {0} exists after volume removal".format(pool["name"]))
564
565 def test_volume_rename(self):
566 """
567 That volume, its file system and pools, can be renamed.
568 """
569 for m in self.mounts:
570 m.umount_wait()
571 oldvolname = self.volname
572 newvolname = self._generate_random_volume_name()
573 new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
574 self._fs_cmd("volume", "rename", oldvolname, newvolname,
575 "--yes-i-really-mean-it")
576 volumels = json.loads(self._fs_cmd('volume', 'ls'))
577 volnames = [volume['name'] for volume in volumels]
578 # volume name changed
579 self.assertIn(newvolname, volnames)
580 self.assertNotIn(oldvolname, volnames)
581 # pool names changed
582 self.fs.get_pool_names(refresh=True)
583 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
584 self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
585
586 def test_volume_rename_idempotency(self):
587 """
588 That volume rename is idempotent.
589 """
590 for m in self.mounts:
591 m.umount_wait()
592 oldvolname = self.volname
593 newvolname = self._generate_random_volume_name()
594 new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
595 self._fs_cmd("volume", "rename", oldvolname, newvolname,
596 "--yes-i-really-mean-it")
597 self._fs_cmd("volume", "rename", oldvolname, newvolname,
598 "--yes-i-really-mean-it")
599 volumels = json.loads(self._fs_cmd('volume', 'ls'))
600 volnames = [volume['name'] for volume in volumels]
601 self.assertIn(newvolname, volnames)
602 self.assertNotIn(oldvolname, volnames)
603 self.fs.get_pool_names(refresh=True)
604 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
605 self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
606
607 def test_volume_rename_fails_without_confirmation_flag(self):
608 """
609 That renaming volume fails without --yes-i-really-mean-it flag.
610 """
611 newvolname = self._generate_random_volume_name()
612 try:
613 self._fs_cmd("volume", "rename", self.volname, newvolname)
614 except CommandFailedError as ce:
615 self.assertEqual(ce.exitstatus, errno.EPERM,
616 "invalid error code on renaming a FS volume without the "
617 "'--yes-i-really-mean-it' flag")
618 else:
619 self.fail("expected renaming of FS volume to fail without the "
620 "'--yes-i-really-mean-it' flag")
621
622 def test_volume_rename_for_more_than_one_data_pool(self):
623 """
624 That renaming a volume with more than one data pool does not change
625 the name of the data pools.
626 """
627 for m in self.mounts:
628 m.umount_wait()
629 self.fs.add_data_pool('another-data-pool')
630 oldvolname = self.volname
631 newvolname = self._generate_random_volume_name()
632 self.fs.get_pool_names(refresh=True)
633 orig_data_pool_names = list(self.fs.data_pools.values())
634 new_metadata_pool = f"cephfs.{newvolname}.meta"
635 self._fs_cmd("volume", "rename", self.volname, newvolname,
636 "--yes-i-really-mean-it")
637 volumels = json.loads(self._fs_cmd('volume', 'ls'))
638 volnames = [volume['name'] for volume in volumels]
639 # volume name changed
640 self.assertIn(newvolname, volnames)
641 self.assertNotIn(oldvolname, volnames)
642 self.fs.get_pool_names(refresh=True)
643 # metadata pool name changed
644 self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
645 # data pool names unchanged
646 self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values()))
647
648 def test_volume_info(self):
649 """
650 Tests the 'fs volume info' command
651 """
652 vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
653 group = self._generate_random_group_name()
654 # create subvolumegroup
655 self._fs_cmd("subvolumegroup", "create", self.volname, group)
656 # get volume metadata
657 vol_info = json.loads(self._get_volume_info(self.volname))
658 for md in vol_fields:
659 self.assertIn(md, vol_info,
660 f"'{md}' key not present in metadata of volume")
661 self.assertEqual(vol_info["used_size"], 0,
662 "Size should be zero when volumes directory is empty")
663
664 def test_volume_info_pending_subvol_deletions(self):
665 """
666 Tests the pending_subvolume_deletions in 'fs volume info' command
667 """
668 subvolname = self._generate_random_subvolume_name()
669 # create subvolume
670 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--mode=777")
671 # create 3K zero byte files
672 self._do_subvolume_io(subvolname, number_of_files=3000, file_size=0)
673 # Delete the subvolume
674 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
675 # get volume metadata
676 vol_info = json.loads(self._get_volume_info(self.volname))
677 self.assertNotEqual(vol_info['pending_subvolume_deletions'], 0,
678 "pending_subvolume_deletions should be 1")
679 # verify trash dir is clean
680 self._wait_for_trash_empty()
681
682 def test_volume_info_without_subvolumegroup(self):
683 """
684 Tests the 'fs volume info' command without subvolume group
685 """
686 vol_fields = ["pools", "mon_addrs"]
687 # get volume metadata
688 vol_info = json.loads(self._get_volume_info(self.volname))
689 for md in vol_fields:
690 self.assertIn(md, vol_info,
691 f"'{md}' key not present in metadata of volume")
692 self.assertNotIn("used_size", vol_info,
693 "'used_size' should not be present in absence of subvolumegroup")
694 self.assertNotIn("pending_subvolume_deletions", vol_info,
695 "'pending_subvolume_deletions' should not be present in absence"
696 " of subvolumegroup")
697
698 def test_volume_info_with_human_readable_flag(self):
699 """
700 Tests the 'fs volume info --human_readable' command
701 """
702 vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
703 group = self._generate_random_group_name()
704 # create subvolumegroup
705 self._fs_cmd("subvolumegroup", "create", self.volname, group)
706 # get volume metadata
707 vol_info = json.loads(self._get_volume_info(self.volname, "--human_readable"))
708 for md in vol_fields:
709 self.assertIn(md, vol_info,
710 f"'{md}' key not present in metadata of volume")
711 units = [' ', 'k', 'M', 'G', 'T', 'P', 'E']
712 assert vol_info["used_size"][-1] in units, "unit suffix in used_size is absent"
713 assert vol_info["pools"]["data"][0]["avail"][-1] in units, "unit suffix in avail data is absent"
714 assert vol_info["pools"]["data"][0]["used"][-1] in units, "unit suffix in used data is absent"
715 assert vol_info["pools"]["metadata"][0]["avail"][-1] in units, "unit suffix in avail metadata is absent"
716 assert vol_info["pools"]["metadata"][0]["used"][-1] in units, "unit suffix in used metadata is absent"
717 self.assertEqual(int(vol_info["used_size"]), 0,
718 "Size should be zero when volumes directory is empty")
719
720 def test_volume_info_with_human_readable_flag_without_subvolumegroup(self):
721 """
722 Tests the 'fs volume info --human_readable' command without subvolume group
723 """
724 vol_fields = ["pools", "mon_addrs"]
725 # get volume metadata
726 vol_info = json.loads(self._get_volume_info(self.volname, "--human_readable"))
727 for md in vol_fields:
728 self.assertIn(md, vol_info,
729 f"'{md}' key not present in metadata of volume")
730 units = [' ', 'k', 'M', 'G', 'T', 'P', 'E']
731 assert vol_info["pools"]["data"][0]["avail"][-1] in units, "unit suffix in avail data is absent"
732 assert vol_info["pools"]["data"][0]["used"][-1] in units, "unit suffix in used data is absent"
733 assert vol_info["pools"]["metadata"][0]["avail"][-1] in units, "unit suffix in avail metadata is absent"
734 assert vol_info["pools"]["metadata"][0]["used"][-1] in units, "unit suffix in used metadata is absent"
735 self.assertNotIn("used_size", vol_info,
736 "'used_size' should not be present in absence of subvolumegroup")
737 self.assertNotIn("pending_subvolume_deletions", vol_info,
738 "'pending_subvolume_deletions' should not be present in absence"
739 " of subvolumegroup")
740
741
742 class TestSubvolumeGroups(TestVolumesHelper):
743 """Tests for FS subvolume group operations."""
744 def test_default_uid_gid_subvolume_group(self):
745 group = self._generate_random_group_name()
746 expected_uid = 0
747 expected_gid = 0
748
749 # create group
750 self._fs_cmd("subvolumegroup", "create", self.volname, group)
751 group_path = self._get_subvolume_group_path(self.volname, group)
752
753 # check group's uid and gid
754 stat = self.mount_a.stat(group_path)
755 self.assertEqual(stat['st_uid'], expected_uid)
756 self.assertEqual(stat['st_gid'], expected_gid)
757
758 # remove group
759 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
760
761 def test_nonexistent_subvolume_group_create(self):
762 subvolume = self._generate_random_subvolume_name()
763 group = "non_existent_group"
764
765 # try, creating subvolume in a nonexistent group
766 try:
767 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
768 except CommandFailedError as ce:
769 if ce.exitstatus != errno.ENOENT:
770 raise
771 else:
772 raise RuntimeError("expected the 'fs subvolume create' command to fail")
773
774 def test_nonexistent_subvolume_group_rm(self):
775 group = "non_existent_group"
776
777 # try, remove subvolume group
778 try:
779 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
780 except CommandFailedError as ce:
781 if ce.exitstatus != errno.ENOENT:
782 raise
783 else:
784 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
785
786 def test_subvolume_group_create_with_auto_cleanup_on_fail(self):
787 group = self._generate_random_group_name()
788 data_pool = "invalid_pool"
789 # create group with invalid data pool layout
790 with self.assertRaises(CommandFailedError):
791 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
792
793 # check whether group path is cleaned up
794 try:
795 self._fs_cmd("subvolumegroup", "getpath", self.volname, group)
796 except CommandFailedError as ce:
797 if ce.exitstatus != errno.ENOENT:
798 raise
799 else:
800 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
801
802 def test_subvolume_group_create_with_desired_data_pool_layout(self):
803 group1, group2 = self._generate_random_group_name(2)
804
805 # create group
806 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
807 group1_path = self._get_subvolume_group_path(self.volname, group1)
808
809 default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool")
810 new_pool = "new_pool"
811 self.assertNotEqual(default_pool, new_pool)
812
813 # add data pool
814 newid = self.fs.add_data_pool(new_pool)
815
816 # create group specifying the new data pool as its pool layout
817 self._fs_cmd("subvolumegroup", "create", self.volname, group2,
818 "--pool_layout", new_pool)
819 group2_path = self._get_subvolume_group_path(self.volname, group2)
820
821 desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool")
822 try:
823 self.assertEqual(desired_pool, new_pool)
824 except AssertionError:
825 self.assertEqual(int(desired_pool), newid) # old kernel returns id
826
827 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
828 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
829
830 def test_subvolume_group_create_with_desired_mode(self):
831 group1, group2 = self._generate_random_group_name(2)
832 # default mode
833 expected_mode1 = "755"
834 # desired mode
835 expected_mode2 = "777"
836
837 # create group
838 self._fs_cmd("subvolumegroup", "create", self.volname, group2, f"--mode={expected_mode2}")
839 self._fs_cmd("subvolumegroup", "create", self.volname, group1)
840
841 group1_path = self._get_subvolume_group_path(self.volname, group1)
842 group2_path = self._get_subvolume_group_path(self.volname, group2)
843 volumes_path = os.path.dirname(group1_path)
844
845 # check group's mode
846 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip()
847 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip()
848 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
849 self.assertEqual(actual_mode1, expected_mode1)
850 self.assertEqual(actual_mode2, expected_mode2)
851 self.assertEqual(actual_mode3, expected_mode1)
852
853 self._fs_cmd("subvolumegroup", "rm", self.volname, group1)
854 self._fs_cmd("subvolumegroup", "rm", self.volname, group2)
855
856 def test_subvolume_group_create_with_desired_uid_gid(self):
857 """
858 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
859 expected values.
860 """
861 uid = 1000
862 gid = 1000
863
864 # create subvolume group
865 subvolgroupname = self._generate_random_group_name()
866 self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid))
867
868 # make sure it exists
869 subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname)
870 self.assertNotEqual(subvolgrouppath, None)
871
872 # verify the uid and gid
873 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip())
874 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip())
875 self.assertEqual(uid, suid)
876 self.assertEqual(gid, sgid)
877
878 # remove group
879 self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname)
880
881 def test_subvolume_group_create_with_invalid_data_pool_layout(self):
882 group = self._generate_random_group_name()
883 data_pool = "invalid_pool"
884 # create group with invalid data pool layout
885 try:
886 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool)
887 except CommandFailedError as ce:
888 if ce.exitstatus != errno.EINVAL:
889 raise
890 else:
891 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
892
893 def test_subvolume_group_create_with_size(self):
894 # create group with size -- should set quota
895 group = self._generate_random_group_name()
896 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
897
898 # get group metadata
899 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
900 self.assertEqual(group_info["bytes_quota"], 1000000000)
901
902 # remove group
903 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
904
905 def test_subvolume_group_info(self):
906 # tests the 'fs subvolumegroup info' command
907
908 group_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
909 "data_pool", "gid", "mode", "mon_addrs", "mtime", "uid"]
910
911 # create group
912 group = self._generate_random_group_name()
913 self._fs_cmd("subvolumegroup", "create", self.volname, group)
914
915 # get group metadata
916 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
917 for md in group_md:
918 self.assertIn(md, group_info, "'{0}' key not present in metadata of group".format(md))
919
920 self.assertEqual(group_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
921 self.assertEqual(group_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
922 self.assertEqual(group_info["uid"], 0)
923 self.assertEqual(group_info["gid"], 0)
924
925 nsize = self.DEFAULT_FILE_SIZE*1024*1024
926 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
927
928 # get group metadata after quota set
929 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
930 for md in group_md:
931 self.assertIn(md, group_info, "'{0}' key not present in metadata of subvolume".format(md))
932
933 self.assertNotEqual(group_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is set")
934 self.assertEqual(group_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
935
936 # remove group
937 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
938
939 def test_subvolume_group_create_idempotence(self):
940 # create group
941 group = self._generate_random_group_name()
942 self._fs_cmd("subvolumegroup", "create", self.volname, group)
943
944 # try creating w/ same subvolume group name -- should be idempotent
945 self._fs_cmd("subvolumegroup", "create", self.volname, group)
946
947 # remove group
948 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
949
950 def test_subvolume_group_create_idempotence_mode(self):
951 # create group
952 group = self._generate_random_group_name()
953 self._fs_cmd("subvolumegroup", "create", self.volname, group)
954
955 # try creating w/ same subvolume group name with mode -- should set mode
956 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=766")
957
958 group_path = self._get_subvolume_group_path(self.volname, group)
959
960 # check subvolumegroup's mode
961 mode = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip()
962 self.assertEqual(mode, "766")
963
964 # remove group
965 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
966
967 def test_subvolume_group_create_idempotence_uid_gid(self):
968 desired_uid = 1000
969 desired_gid = 1000
970
971 # create group
972 group = self._generate_random_group_name()
973 self._fs_cmd("subvolumegroup", "create", self.volname, group)
974
975 # try creating w/ same subvolume group name with uid/gid -- should set uid/gid
976 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--uid", str(desired_uid), "--gid", str(desired_gid))
977
978 group_path = self._get_subvolume_group_path(self.volname, group)
979
980 # verify the uid and gid
981 actual_uid = int(self.mount_a.run_shell(['stat', '-c' '%u', group_path]).stdout.getvalue().strip())
982 actual_gid = int(self.mount_a.run_shell(['stat', '-c' '%g', group_path]).stdout.getvalue().strip())
983 self.assertEqual(desired_uid, actual_uid)
984 self.assertEqual(desired_gid, actual_gid)
985
986 # remove group
987 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
988
989 def test_subvolume_group_create_idempotence_data_pool(self):
990 # create group
991 group = self._generate_random_group_name()
992 self._fs_cmd("subvolumegroup", "create", self.volname, group)
993
994 group_path = self._get_subvolume_group_path(self.volname, group)
995
996 default_pool = self.mount_a.getfattr(group_path, "ceph.dir.layout.pool")
997 new_pool = "new_pool"
998 self.assertNotEqual(default_pool, new_pool)
999
1000 # add data pool
1001 newid = self.fs.add_data_pool(new_pool)
1002
1003 # try creating w/ same subvolume group name with new data pool -- should set pool
1004 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", new_pool)
1005 desired_pool = self.mount_a.getfattr(group_path, "ceph.dir.layout.pool")
1006 try:
1007 self.assertEqual(desired_pool, new_pool)
1008 except AssertionError:
1009 self.assertEqual(int(desired_pool), newid) # old kernel returns id
1010
1011 # remove group
1012 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1013
1014 def test_subvolume_group_create_idempotence_resize(self):
1015 # create group
1016 group = self._generate_random_group_name()
1017 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1018
1019 # try creating w/ same subvolume name with size -- should set quota
1020 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
1021
1022 # get group metadata
1023 group_info = json.loads(self._get_subvolume_group_info(self.volname, group))
1024 self.assertEqual(group_info["bytes_quota"], 1000000000)
1025
1026 # remove group
1027 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1028
1029 def test_subvolume_group_quota_mds_path_restriction_to_group_path(self):
1030 """
1031 Tests subvolumegroup quota enforcement with mds path restriction set to group.
1032 For quota to be enforced, read permission needs to be provided to the parent
1033 of the directory on which quota is set. Please see the tracker comment [1]
1034 [1] https://tracker.ceph.com/issues/55090#note-8
1035 """
1036 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1037 # create group with 100MB quota
1038 group = self._generate_random_group_name()
1039 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1040 "--size", str(osize), "--mode=777")
1041
1042 # make sure it exists
1043 grouppath = self._get_subvolume_group_path(self.volname, group)
1044 self.assertNotEqual(grouppath, None)
1045
1046 # create subvolume under the group
1047 subvolname = self._generate_random_subvolume_name()
1048 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1049 "--group_name", group, "--mode=777")
1050
1051 # make sure it exists
1052 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1053 self.assertNotEqual(subvolpath, None)
1054
1055 # Create auth_id
1056 authid = "client.guest1"
1057 user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
1058 "auth", "get-or-create", authid,
1059 "mds", "allow rw path=/volumes",
1060 "mgr", "allow rw",
1061 "osd", "allow rw tag cephfs *=*",
1062 "mon", "allow r",
1063 "--format=json-pretty"
1064 ))
1065
1066 # Prepare guest_mount with new authid
1067 guest_mount = self.mount_b
1068 guest_mount.umount_wait()
1069
1070 # configure credentials for guest client
1071 self._configure_guest_auth(guest_mount, "guest1", user[0]["key"])
1072
1073 # mount the subvolume
1074 mount_path = os.path.join("/", subvolpath)
1075 guest_mount.mount_wait(cephfs_mntpt=mount_path)
1076
1077 # create 99 files of 1MB
1078 guest_mount.run_shell_payload("mkdir -p dir1")
1079 for i in range(99):
1080 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1081 guest_mount.write_n_mb(os.path.join("dir1", filename), self.DEFAULT_FILE_SIZE)
1082 try:
1083 # write two files of 1MB file to exceed the quota
1084 guest_mount.run_shell_payload("mkdir -p dir2")
1085 for i in range(2):
1086 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1087 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1088 # For quota to be enforced
1089 time.sleep(60)
1090 # create 400 files of 1MB to exceed quota
1091 for i in range(400):
1092 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1093 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1094 # Sometimes quota enforcement takes time.
1095 if i == 200:
1096 time.sleep(60)
1097 except CommandFailedError:
1098 pass
1099 else:
1100 self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1101
1102 # clean up
1103 guest_mount.umount_wait()
1104
1105 # Delete the subvolume
1106 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1107
1108 # remove group
1109 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1110
1111 # verify trash dir is clean
1112 self._wait_for_trash_empty()
1113
1114 def test_subvolume_group_quota_mds_path_restriction_to_subvolume_path(self):
1115 """
1116 Tests subvolumegroup quota enforcement with mds path restriction set to subvolume path
1117 The quota should not be enforced because of the fourth limitation mentioned at
1118 https://docs.ceph.com/en/latest/cephfs/quota/#limitations
1119 """
1120 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1121 # create group with 100MB quota
1122 group = self._generate_random_group_name()
1123 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1124 "--size", str(osize), "--mode=777")
1125
1126 # make sure it exists
1127 grouppath = self._get_subvolume_group_path(self.volname, group)
1128 self.assertNotEqual(grouppath, None)
1129
1130 # create subvolume under the group
1131 subvolname = self._generate_random_subvolume_name()
1132 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1133 "--group_name", group, "--mode=777")
1134
1135 # make sure it exists
1136 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1137 self.assertNotEqual(subvolpath, None)
1138
1139 mount_path = os.path.join("/", subvolpath)
1140
1141 # Create auth_id
1142 authid = "client.guest1"
1143 user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
1144 "auth", "get-or-create", authid,
1145 "mds", f"allow rw path={mount_path}",
1146 "mgr", "allow rw",
1147 "osd", "allow rw tag cephfs *=*",
1148 "mon", "allow r",
1149 "--format=json-pretty"
1150 ))
1151
1152 # Prepare guest_mount with new authid
1153 guest_mount = self.mount_b
1154 guest_mount.umount_wait()
1155
1156 # configure credentials for guest client
1157 self._configure_guest_auth(guest_mount, "guest1", user[0]["key"])
1158
1159 # mount the subvolume
1160 guest_mount.mount_wait(cephfs_mntpt=mount_path)
1161
1162 # create 99 files of 1MB to exceed quota
1163 guest_mount.run_shell_payload("mkdir -p dir1")
1164 for i in range(99):
1165 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1166 guest_mount.write_n_mb(os.path.join("dir1", filename), self.DEFAULT_FILE_SIZE)
1167 try:
1168 # write two files of 1MB file to exceed the quota
1169 guest_mount.run_shell_payload("mkdir -p dir2")
1170 for i in range(2):
1171 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1172 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1173 # For quota to be enforced
1174 time.sleep(60)
1175 # create 400 files of 1MB to exceed quota
1176 for i in range(400):
1177 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i)
1178 guest_mount.write_n_mb(os.path.join("dir2", filename), self.DEFAULT_FILE_SIZE)
1179 # Sometimes quota enforcement takes time.
1180 if i == 200:
1181 time.sleep(60)
1182 except CommandFailedError:
1183 self.fail(f"Quota should not be enforced, expected filling subvolume {subvolname} with 400 files of size 1MB to succeed")
1184
1185 # clean up
1186 guest_mount.umount_wait()
1187
1188 # Delete the subvolume
1189 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1190
1191 # remove group
1192 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1193
1194 # verify trash dir is clean
1195 self._wait_for_trash_empty()
1196
1197 def test_subvolume_group_quota_exceeded_subvolume_removal(self):
1198 """
1199 Tests subvolume removal if it's group quota is exceeded
1200 """
1201 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1202 # create group with 100MB quota
1203 group = self._generate_random_group_name()
1204 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1205 "--size", str(osize), "--mode=777")
1206
1207 # make sure it exists
1208 grouppath = self._get_subvolume_group_path(self.volname, group)
1209 self.assertNotEqual(grouppath, None)
1210
1211 # create subvolume under the group
1212 subvolname = self._generate_random_subvolume_name()
1213 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1214 "--group_name", group, "--mode=777")
1215
1216 # make sure it exists
1217 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1218 self.assertNotEqual(subvolpath, None)
1219
1220 # create 99 files of 1MB to exceed quota
1221 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99)
1222
1223 try:
1224 # write two files of 1MB file to exceed the quota
1225 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1226 # For quota to be enforced
1227 time.sleep(20)
1228 # create 400 files of 1MB to exceed quota
1229 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=400)
1230 except CommandFailedError:
1231 # Delete subvolume when group quota is exceeded
1232 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1233 else:
1234 self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1235
1236 # remove group
1237 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1238
1239 # verify trash dir is clean
1240 self._wait_for_trash_empty()
1241
1242 def test_subvolume_group_quota_exceeded_subvolume_removal_retained_snaps(self):
1243 """
1244 Tests retained snapshot subvolume removal if it's group quota is exceeded
1245 """
1246 group = self._generate_random_group_name()
1247 subvolname = self._generate_random_subvolume_name()
1248 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
1249
1250 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1251 # create group with 100MB quota
1252 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1253 "--size", str(osize), "--mode=777")
1254
1255 # make sure it exists
1256 grouppath = self._get_subvolume_group_path(self.volname, group)
1257 self.assertNotEqual(grouppath, None)
1258
1259 # create subvolume under the group
1260 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1261 "--group_name", group, "--mode=777")
1262
1263 # make sure it exists
1264 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1265 self.assertNotEqual(subvolpath, None)
1266
1267 # create 99 files of 1MB to exceed quota
1268 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99)
1269
1270 # snapshot subvolume
1271 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot1, "--group_name", group)
1272 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot2, "--group_name", group)
1273
1274 try:
1275 # write two files of 1MB file to exceed the quota
1276 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1277 # For quota to be enforced
1278 time.sleep(20)
1279 # create 400 files of 1MB to exceed quota
1280 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=400)
1281 except CommandFailedError:
1282 # remove with snapshot retention
1283 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group, "--retain-snapshots")
1284 # remove snapshot1
1285 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot1, "--group_name", group)
1286 # remove snapshot2 (should remove volume)
1287 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot2, "--group_name", group)
1288 # verify subvolume trash is clean
1289 self._wait_for_subvol_trash_empty(subvolname, group=group)
1290 else:
1291 self.fail(f"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1292
1293 # remove group
1294 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1295
1296 # verify trash dir is clean
1297 self._wait_for_trash_empty()
1298
1299 def test_subvolume_group_quota_subvolume_removal(self):
1300 """
1301 Tests subvolume removal if it's group quota is set.
1302 """
1303 # create group with size -- should set quota
1304 group = self._generate_random_group_name()
1305 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
1306
1307 # create subvolume under the group
1308 subvolname = self._generate_random_subvolume_name()
1309 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
1310
1311 # remove subvolume
1312 try:
1313 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1314 except CommandFailedError:
1315 self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1316
1317 # remove subvolumegroup
1318 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1319
1320 # verify trash dir is clean
1321 self._wait_for_trash_empty()
1322
1323 def test_subvolume_group_quota_legacy_subvolume_removal(self):
1324 """
1325 Tests legacy subvolume removal if it's group quota is set.
1326 """
1327 subvolume = self._generate_random_subvolume_name()
1328 group = self._generate_random_group_name()
1329
1330 # emulate a old-fashioned subvolume -- in a custom group
1331 createpath1 = os.path.join(".", "volumes", group, subvolume)
1332 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
1333
1334 # this would auto-upgrade on access without anyone noticing
1335 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, "--group-name", group)
1336 self.assertNotEqual(subvolpath1, None)
1337 subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
1338
1339 # and... the subvolume path returned should be what we created behind the scene
1340 self.assertEqual(createpath1[1:], subvolpath1)
1341
1342 # Set subvolumegroup quota on idempotent subvolumegroup creation
1343 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
1344
1345 # remove subvolume
1346 try:
1347 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1348 except CommandFailedError:
1349 self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1350
1351 # remove subvolumegroup
1352 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1353
1354 # verify trash dir is clean
1355 self._wait_for_trash_empty()
1356
1357 def test_subvolume_group_quota_v1_subvolume_removal(self):
1358 """
1359 Tests v1 subvolume removal if it's group quota is set.
1360 """
1361 subvolume = self._generate_random_subvolume_name()
1362 group = self._generate_random_group_name()
1363
1364 # emulate a v1 subvolume -- in a custom group
1365 self._create_v1_subvolume(subvolume, subvol_group=group, has_snapshot=False)
1366
1367 # Set subvolumegroup quota on idempotent subvolumegroup creation
1368 self._fs_cmd("subvolumegroup", "create", self.volname, group, "1000000000")
1369
1370 # remove subvolume
1371 try:
1372 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
1373 except CommandFailedError:
1374 self.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1375
1376 # remove subvolumegroup
1377 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1378
1379 # verify trash dir is clean
1380 self._wait_for_trash_empty()
1381
1382 def test_subvolume_group_resize_fail_invalid_size(self):
1383 """
1384 That a subvolume group cannot be resized to an invalid size and the quota did not change
1385 """
1386
1387 osize = self.DEFAULT_FILE_SIZE*1024*1024
1388 # create group with 1MB quota
1389 group = self._generate_random_group_name()
1390 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--size", str(osize))
1391
1392 # make sure it exists
1393 grouppath = self._get_subvolume_group_path(self.volname, group)
1394 self.assertNotEqual(grouppath, None)
1395
1396 # try to resize the subvolume with an invalid size -10
1397 nsize = -10
1398 try:
1399 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1400 except CommandFailedError as ce:
1401 self.assertEqual(ce.exitstatus, errno.EINVAL,
1402 "invalid error code on resize of subvolume group with invalid size")
1403 else:
1404 self.fail("expected the 'fs subvolumegroup resize' command to fail")
1405
1406 # verify the quota did not change
1407 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1408 self.assertEqual(size, osize)
1409
1410 # remove group
1411 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1412
1413 def test_subvolume_group_resize_fail_zero_size(self):
1414 """
1415 That a subvolume group cannot be resized to a zero size and the quota did not change
1416 """
1417
1418 osize = self.DEFAULT_FILE_SIZE*1024*1024
1419 # create group with 1MB quota
1420 group = self._generate_random_group_name()
1421 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--size", str(osize))
1422
1423 # make sure it exists
1424 grouppath = self._get_subvolume_group_path(self.volname, group)
1425 self.assertNotEqual(grouppath, None)
1426
1427 # try to resize the subvolume group with size 0
1428 nsize = 0
1429 try:
1430 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1431 except CommandFailedError as ce:
1432 self.assertEqual(ce.exitstatus, errno.EINVAL,
1433 "invalid error code on resize of subvolume group with invalid size")
1434 else:
1435 self.fail("expected the 'fs subvolumegroup resize' command to fail")
1436
1437 # verify the quota did not change
1438 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1439 self.assertEqual(size, osize)
1440
1441 # remove group
1442 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1443
1444 def test_subvolume_group_resize_quota_lt_used_size(self):
1445 """
1446 That a subvolume group can be resized to a size smaller than the current used size
1447 and the resulting quota matches the expected size.
1448 """
1449
1450 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
1451 # create group with 20MB quota
1452 group = self._generate_random_group_name()
1453 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1454 "--size", str(osize), "--mode=777")
1455
1456 # make sure it exists
1457 grouppath = self._get_subvolume_group_path(self.volname, group)
1458 self.assertNotEqual(grouppath, None)
1459
1460 # create subvolume under the group
1461 subvolname = self._generate_random_subvolume_name()
1462 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1463 "--group_name", group, "--mode=777")
1464
1465 # make sure it exists
1466 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1467 self.assertNotEqual(subvolpath, None)
1468
1469 # create one file of 10MB
1470 file_size=self.DEFAULT_FILE_SIZE*10
1471 number_of_files=1
1472 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
1473 number_of_files,
1474 file_size))
1475 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
1476 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
1477
1478 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
1479
1480 # shrink the subvolume group
1481 nsize = usedsize // 2
1482 try:
1483 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1484 except CommandFailedError:
1485 self.fail("expected the 'fs subvolumegroup resize' command to succeed")
1486
1487 # verify the quota
1488 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1489 self.assertEqual(size, nsize)
1490
1491 # remove subvolume and group
1492 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1493 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1494
1495 # verify trash dir is clean
1496 self._wait_for_trash_empty()
1497
1498 def test_subvolume_group_resize_fail_quota_lt_used_size_no_shrink(self):
1499 """
1500 That a subvolume group cannot be resized to a size smaller than the current used size
1501 when --no_shrink is given and the quota did not change.
1502 """
1503
1504 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
1505 # create group with 20MB quota
1506 group = self._generate_random_group_name()
1507 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1508 "--size", str(osize), "--mode=777")
1509
1510 # make sure it exists
1511 grouppath = self._get_subvolume_group_path(self.volname, group)
1512 self.assertNotEqual(grouppath, None)
1513
1514 # create subvolume under the group
1515 subvolname = self._generate_random_subvolume_name()
1516 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1517 "--group_name", group, "--mode=777")
1518
1519 # make sure it exists
1520 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1521 self.assertNotEqual(subvolpath, None)
1522
1523 # create one file of 10MB
1524 file_size=self.DEFAULT_FILE_SIZE*10
1525 number_of_files=1
1526 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
1527 number_of_files,
1528 file_size))
1529 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
1530 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
1531
1532 usedsize = int(self.mount_a.getfattr(grouppath, "ceph.dir.rbytes"))
1533
1534 # shrink the subvolume group
1535 nsize = usedsize // 2
1536 try:
1537 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize), "--no_shrink")
1538 except CommandFailedError as ce:
1539 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolumegroup with quota less than used")
1540 else:
1541 self.fail("expected the 'fs subvolumegroup resize' command to fail")
1542
1543 # verify the quota did not change
1544 size = int(self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes"))
1545 self.assertEqual(size, osize)
1546
1547 # remove subvolume and group
1548 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1549 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1550
1551 # verify trash dir is clean
1552 self._wait_for_trash_empty()
1553
1554 def test_subvolume_group_resize_expand_on_full_subvolume(self):
1555 """
1556 That the subvolume group can be expanded after it is full and future write succeed
1557 """
1558
1559 osize = self.DEFAULT_FILE_SIZE*1024*1024*100
1560 # create group with 100MB quota
1561 group = self._generate_random_group_name()
1562 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1563 "--size", str(osize), "--mode=777")
1564
1565 # make sure it exists
1566 grouppath = self._get_subvolume_group_path(self.volname, group)
1567 self.assertNotEqual(grouppath, None)
1568
1569 # create subvolume under the group
1570 subvolname = self._generate_random_subvolume_name()
1571 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1572 "--group_name", group, "--mode=777")
1573
1574 # make sure it exists
1575 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1576 self.assertNotEqual(subvolpath, None)
1577
1578 # create 99 files of 1MB
1579 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=99)
1580
1581 try:
1582 # write two files of 1MB file to exceed the quota
1583 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1584 # For quota to be enforced
1585 time.sleep(20)
1586 # create 500 files of 1MB
1587 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1588 except CommandFailedError:
1589 # Not able to write. So expand the subvolumegroup more and try writing the files again
1590 nsize = osize*7
1591 self._fs_cmd("subvolumegroup", "resize", self.volname, group, str(nsize))
1592 try:
1593 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1594 except CommandFailedError:
1595 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1596 "to succeed".format(subvolname))
1597 else:
1598 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1599 "to fail".format(subvolname))
1600
1601 # remove subvolume and group
1602 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1603 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1604
1605 # verify trash dir is clean
1606 self._wait_for_trash_empty()
1607
1608 def test_subvolume_group_resize_infinite_size(self):
1609 """
1610 That a subvolume group can be resized to an infinite size by unsetting its quota.
1611 """
1612
1613 osize = self.DEFAULT_FILE_SIZE*1024*1024
1614 # create group
1615 group = self._generate_random_group_name()
1616 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1617 "--size", str(osize))
1618
1619 # make sure it exists
1620 grouppath = self._get_subvolume_group_path(self.volname, group)
1621 self.assertNotEqual(grouppath, None)
1622
1623 # resize inf
1624 self._fs_cmd("subvolumegroup", "resize", self.volname, group, "inf")
1625
1626 # verify that the quota is None
1627 size = self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")
1628 self.assertEqual(size, None)
1629
1630 # remove subvolume group
1631 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1632
1633 def test_subvolume_group_resize_infinite_size_future_writes(self):
1634 """
1635 That a subvolume group can be resized to an infinite size and the future writes succeed.
1636 """
1637
1638 osize = self.DEFAULT_FILE_SIZE*1024*1024*5
1639 # create group with 5MB quota
1640 group = self._generate_random_group_name()
1641 self._fs_cmd("subvolumegroup", "create", self.volname, group,
1642 "--size", str(osize), "--mode=777")
1643
1644 # make sure it exists
1645 grouppath = self._get_subvolume_group_path(self.volname, group)
1646 self.assertNotEqual(grouppath, None)
1647
1648 # create subvolume under the group
1649 subvolname = self._generate_random_subvolume_name()
1650 self._fs_cmd("subvolume", "create", self.volname, subvolname,
1651 "--group_name", group, "--mode=777")
1652
1653 # make sure it exists
1654 subvolpath = self._get_subvolume_path(self.volname, subvolname, group_name=group)
1655 self.assertNotEqual(subvolpath, None)
1656
1657 # create 4 files of 1MB
1658 self._do_subvolume_io(subvolname, subvolume_group=group, number_of_files=4)
1659
1660 try:
1661 # write two files of 1MB file to exceed the quota
1662 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=2)
1663 # For quota to be enforced
1664 time.sleep(20)
1665 # create 500 files of 1MB
1666 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1667 except CommandFailedError:
1668 # Not able to write. So resize subvolumegroup to 'inf' and try writing the files again
1669 # resize inf
1670 self._fs_cmd("subvolumegroup", "resize", self.volname, group, "inf")
1671 try:
1672 self._do_subvolume_io(subvolname, subvolume_group=group, create_dir='dir1', number_of_files=500)
1673 except CommandFailedError:
1674 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1675 "to succeed".format(subvolname))
1676 else:
1677 self.fail("expected filling subvolume {0} with 500 files of size 1MB "
1678 "to fail".format(subvolname))
1679
1680
1681 # verify that the quota is None
1682 size = self.mount_a.getfattr(grouppath, "ceph.quota.max_bytes")
1683 self.assertEqual(size, None)
1684
1685 # remove subvolume and group
1686 self._fs_cmd("subvolume", "rm", self.volname, subvolname, "--group_name", group)
1687 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1688
1689 # verify trash dir is clean
1690 self._wait_for_trash_empty()
1691
1692 def test_subvolume_group_ls(self):
1693 # tests the 'fs subvolumegroup ls' command
1694
1695 subvolumegroups = []
1696
1697 #create subvolumegroups
1698 subvolumegroups = self._generate_random_group_name(3)
1699 for groupname in subvolumegroups:
1700 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
1701
1702 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1703 if len(subvolumegroupls) == 0:
1704 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
1705 else:
1706 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
1707 if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups):
1708 raise RuntimeError("Error creating or listing subvolume groups")
1709
1710 def test_subvolume_group_ls_filter(self):
1711 # tests the 'fs subvolumegroup ls' command filters '_deleting' directory
1712
1713 subvolumegroups = []
1714
1715 #create subvolumegroup
1716 subvolumegroups = self._generate_random_group_name(3)
1717 for groupname in subvolumegroups:
1718 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
1719
1720 # create subvolume and remove. This creates '_deleting' directory.
1721 subvolume = self._generate_random_subvolume_name()
1722 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1723 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1724
1725 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1726 subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls]
1727 if "_deleting" in subvolgroupnames:
1728 self.fail("Listing subvolume groups listed '_deleting' directory")
1729
1730 def test_subvolume_group_ls_filter_internal_directories(self):
1731 # tests the 'fs subvolumegroup ls' command filters internal directories
1732 # eg: '_deleting', '_nogroup', '_index', "_legacy"
1733
1734 subvolumegroups = self._generate_random_group_name(3)
1735 subvolume = self._generate_random_subvolume_name()
1736 snapshot = self._generate_random_snapshot_name()
1737 clone = self._generate_random_clone_name()
1738
1739 #create subvolumegroups
1740 for groupname in subvolumegroups:
1741 self._fs_cmd("subvolumegroup", "create", self.volname, groupname)
1742
1743 # create subvolume which will create '_nogroup' directory
1744 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1745
1746 # create snapshot
1747 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
1748
1749 # clone snapshot which will create '_index' directory
1750 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
1751
1752 # wait for clone to complete
1753 self._wait_for_clone_to_complete(clone)
1754
1755 # remove snapshot
1756 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
1757
1758 # remove subvolume which will create '_deleting' directory
1759 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1760
1761 # list subvolumegroups
1762 ret = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1763 self.assertEqual(len(ret), len(subvolumegroups))
1764
1765 ret_list = [subvolumegroup['name'] for subvolumegroup in ret]
1766 self.assertEqual(len(ret_list), len(subvolumegroups))
1767
1768 self.assertEqual(all(elem in subvolumegroups for elem in ret_list), True)
1769
1770 # cleanup
1771 self._fs_cmd("subvolume", "rm", self.volname, clone)
1772 for groupname in subvolumegroups:
1773 self._fs_cmd("subvolumegroup", "rm", self.volname, groupname)
1774
1775 def test_subvolume_group_ls_for_nonexistent_volume(self):
1776 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
1777 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
1778
1779 # list subvolume groups
1780 subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname))
1781 if len(subvolumegroupls) > 0:
1782 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
1783
1784 def test_subvolumegroup_pin_distributed(self):
1785 self.fs.set_max_mds(2)
1786 status = self.fs.wait_for_daemons()
1787 self.config_set('mds', 'mds_export_ephemeral_distributed', True)
1788
1789 group = "pinme"
1790 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1791 self._fs_cmd("subvolumegroup", "pin", self.volname, group, "distributed", "True")
1792 subvolumes = self._generate_random_subvolume_name(50)
1793 for subvolume in subvolumes:
1794 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1795 self._wait_distributed_subtrees(2 * 2, status=status, rank="all")
1796
1797 # remove subvolumes
1798 for subvolume in subvolumes:
1799 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1800
1801 # verify trash dir is clean
1802 self._wait_for_trash_empty()
1803
1804 def test_subvolume_group_rm_force(self):
1805 # test removing non-existing subvolume group with --force
1806 group = self._generate_random_group_name()
1807 try:
1808 self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force")
1809 except CommandFailedError:
1810 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
1811
1812 def test_subvolume_group_exists_with_subvolumegroup_and_no_subvolume(self):
1813 """Test the presence of any subvolumegroup when only subvolumegroup is present"""
1814
1815 group = self._generate_random_group_name()
1816 # create subvolumegroup
1817 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1818 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1819 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1820 # delete subvolumegroup
1821 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1822 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1823 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1824
1825 def test_subvolume_group_exists_with_no_subvolumegroup_and_subvolume(self):
1826 """Test the presence of any subvolumegroup when no subvolumegroup is present"""
1827
1828 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1829 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1830
1831 def test_subvolume_group_exists_with_subvolumegroup_and_subvolume(self):
1832 """Test the presence of any subvolume when subvolumegroup
1833 and subvolume both are present"""
1834
1835 group = self._generate_random_group_name()
1836 subvolume = self._generate_random_subvolume_name(2)
1837 # create subvolumegroup
1838 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1839 # create subvolume in group
1840 self._fs_cmd("subvolume", "create", self.volname, subvolume[0], "--group_name", group)
1841 # create subvolume
1842 self._fs_cmd("subvolume", "create", self.volname, subvolume[1])
1843 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1844 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1845 # delete subvolume in group
1846 self._fs_cmd("subvolume", "rm", self.volname, subvolume[0], "--group_name", group)
1847 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1848 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1849 # delete subvolume
1850 self._fs_cmd("subvolume", "rm", self.volname, subvolume[1])
1851 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1852 self.assertEqual(ret.strip('\n'), "subvolumegroup exists")
1853 # delete subvolumegroup
1854 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1855 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1856 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1857
1858 def test_subvolume_group_exists_without_subvolumegroup_and_with_subvolume(self):
1859 """Test the presence of any subvolume when subvolume is present
1860 but no subvolumegroup is present"""
1861
1862 subvolume = self._generate_random_subvolume_name()
1863 # create subvolume
1864 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1865 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1866 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1867 # delete subvolume
1868 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1869 ret = self._fs_cmd("subvolumegroup", "exist", self.volname)
1870 self.assertEqual(ret.strip('\n'), "no subvolumegroup exists")
1871
1872
1873 class TestSubvolumes(TestVolumesHelper):
1874 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
1875 def test_async_subvolume_rm(self):
1876 subvolumes = self._generate_random_subvolume_name(100)
1877
1878 # create subvolumes
1879 for subvolume in subvolumes:
1880 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
1881 self._do_subvolume_io(subvolume, number_of_files=10)
1882
1883 self.mount_a.umount_wait()
1884
1885 # remove subvolumes
1886 for subvolume in subvolumes:
1887 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1888
1889 self.mount_a.mount_wait()
1890
1891 # verify trash dir is clean
1892 self._wait_for_trash_empty(timeout=300)
1893
1894 def test_default_uid_gid_subvolume(self):
1895 subvolume = self._generate_random_subvolume_name()
1896 expected_uid = 0
1897 expected_gid = 0
1898
1899 # create subvolume
1900 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1901 subvol_path = self._get_subvolume_path(self.volname, subvolume)
1902
1903 # check subvolume's uid and gid
1904 stat = self.mount_a.stat(subvol_path)
1905 self.assertEqual(stat['st_uid'], expected_uid)
1906 self.assertEqual(stat['st_gid'], expected_gid)
1907
1908 # remove subvolume
1909 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1910
1911 # verify trash dir is clean
1912 self._wait_for_trash_empty()
1913
1914 def test_nonexistent_subvolume_rm(self):
1915 # remove non-existing subvolume
1916 subvolume = "non_existent_subvolume"
1917
1918 # try, remove subvolume
1919 try:
1920 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1921 except CommandFailedError as ce:
1922 if ce.exitstatus != errno.ENOENT:
1923 raise
1924 else:
1925 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
1926
1927 def test_subvolume_create_and_rm(self):
1928 # create subvolume
1929 subvolume = self._generate_random_subvolume_name()
1930 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1931
1932 # make sure it exists
1933 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1934 self.assertNotEqual(subvolpath, None)
1935
1936 # remove subvolume
1937 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1938 # make sure its gone
1939 try:
1940 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
1941 except CommandFailedError as ce:
1942 if ce.exitstatus != errno.ENOENT:
1943 raise
1944 else:
1945 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
1946
1947 # verify trash dir is clean
1948 self._wait_for_trash_empty()
1949
1950 def test_subvolume_create_and_rm_in_group(self):
1951 subvolume = self._generate_random_subvolume_name()
1952 group = self._generate_random_group_name()
1953
1954 # create group
1955 self._fs_cmd("subvolumegroup", "create", self.volname, group)
1956
1957 # create subvolume in group
1958 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
1959
1960 # remove subvolume
1961 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
1962
1963 # verify trash dir is clean
1964 self._wait_for_trash_empty()
1965
1966 # remove group
1967 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
1968
1969 def test_subvolume_create_idempotence(self):
1970 # create subvolume
1971 subvolume = self._generate_random_subvolume_name()
1972 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1973
1974 # try creating w/ same subvolume name -- should be idempotent
1975 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1976
1977 # remove subvolume
1978 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1979
1980 # verify trash dir is clean
1981 self._wait_for_trash_empty()
1982
1983 def test_subvolume_create_idempotence_resize(self):
1984 # create subvolume
1985 subvolume = self._generate_random_subvolume_name()
1986 self._fs_cmd("subvolume", "create", self.volname, subvolume)
1987
1988 # try creating w/ same subvolume name with size -- should set quota
1989 self._fs_cmd("subvolume", "create", self.volname, subvolume, "1000000000")
1990
1991 # get subvolume metadata
1992 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
1993 self.assertEqual(subvol_info["bytes_quota"], 1000000000)
1994
1995 # remove subvolume
1996 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
1997
1998 # verify trash dir is clean
1999 self._wait_for_trash_empty()
2000
2001 def test_subvolume_create_idempotence_mode(self):
2002 # default mode
2003 default_mode = "755"
2004
2005 # create subvolume
2006 subvolume = self._generate_random_subvolume_name()
2007 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2008
2009 subvol_path = self._get_subvolume_path(self.volname, subvolume)
2010
2011 actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
2012 self.assertEqual(actual_mode_1, default_mode)
2013
2014 # try creating w/ same subvolume name with --mode 777
2015 new_mode = "777"
2016 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", new_mode)
2017
2018 actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
2019 self.assertEqual(actual_mode_2, new_mode)
2020
2021 # remove subvolume
2022 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2023
2024 # verify trash dir is clean
2025 self._wait_for_trash_empty()
2026
2027 def test_subvolume_create_idempotence_without_passing_mode(self):
2028 # create subvolume
2029 desired_mode = "777"
2030 subvolume = self._generate_random_subvolume_name()
2031 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", desired_mode)
2032
2033 subvol_path = self._get_subvolume_path(self.volname, subvolume)
2034
2035 actual_mode_1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
2036 self.assertEqual(actual_mode_1, desired_mode)
2037
2038 # default mode
2039 default_mode = "755"
2040
2041 # try creating w/ same subvolume name without passing --mode argument
2042 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2043
2044 actual_mode_2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol_path]).stdout.getvalue().strip()
2045 self.assertEqual(actual_mode_2, default_mode)
2046
2047 # remove subvolume
2048 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2049
2050 # verify trash dir is clean
2051 self._wait_for_trash_empty()
2052
2053 def test_subvolume_create_isolated_namespace(self):
2054 """
2055 Create subvolume in separate rados namespace
2056 """
2057
2058 # create subvolume
2059 subvolume = self._generate_random_subvolume_name()
2060 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated")
2061
2062 # get subvolume metadata
2063 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
2064 self.assertNotEqual(len(subvol_info), 0)
2065 self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume)
2066
2067 # remove subvolumes
2068 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2069
2070 # verify trash dir is clean
2071 self._wait_for_trash_empty()
2072
2073 def test_subvolume_create_with_auto_cleanup_on_fail(self):
2074 subvolume = self._generate_random_subvolume_name()
2075 data_pool = "invalid_pool"
2076 # create subvolume with invalid data pool layout fails
2077 with self.assertRaises(CommandFailedError):
2078 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
2079
2080 # check whether subvol path is cleaned up
2081 try:
2082 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
2083 except CommandFailedError as ce:
2084 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume")
2085 else:
2086 self.fail("expected the 'fs subvolume getpath' command to fail")
2087
2088 # verify trash dir is clean
2089 self._wait_for_trash_empty()
2090
2091 def test_subvolume_create_with_desired_data_pool_layout_in_group(self):
2092 subvol1, subvol2 = self._generate_random_subvolume_name(2)
2093 group = self._generate_random_group_name()
2094
2095 # create group. this also helps set default pool layout for subvolumes
2096 # created within the group.
2097 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2098
2099 # create subvolume in group.
2100 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
2101 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
2102
2103 default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool")
2104 new_pool = "new_pool"
2105 self.assertNotEqual(default_pool, new_pool)
2106
2107 # add data pool
2108 newid = self.fs.add_data_pool(new_pool)
2109
2110 # create subvolume specifying the new data pool as its pool layout
2111 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group,
2112 "--pool_layout", new_pool)
2113 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
2114
2115 desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool")
2116 try:
2117 self.assertEqual(desired_pool, new_pool)
2118 except AssertionError:
2119 self.assertEqual(int(desired_pool), newid) # old kernel returns id
2120
2121 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
2122 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
2123 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2124
2125 # verify trash dir is clean
2126 self._wait_for_trash_empty()
2127
2128 def test_subvolume_create_with_desired_mode(self):
2129 subvol1 = self._generate_random_subvolume_name()
2130
2131 # default mode
2132 default_mode = "755"
2133 # desired mode
2134 desired_mode = "777"
2135
2136 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--mode", "777")
2137
2138 subvol1_path = self._get_subvolume_path(self.volname, subvol1)
2139
2140 # check subvolumegroup's mode
2141 subvol_par_path = os.path.dirname(subvol1_path)
2142 group_path = os.path.dirname(subvol_par_path)
2143 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group_path]).stdout.getvalue().strip()
2144 self.assertEqual(actual_mode1, default_mode)
2145 # check /volumes mode
2146 volumes_path = os.path.dirname(group_path)
2147 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', volumes_path]).stdout.getvalue().strip()
2148 self.assertEqual(actual_mode2, default_mode)
2149 # check subvolume's mode
2150 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
2151 self.assertEqual(actual_mode3, desired_mode)
2152
2153 self._fs_cmd("subvolume", "rm", self.volname, subvol1)
2154
2155 # verify trash dir is clean
2156 self._wait_for_trash_empty()
2157
2158 def test_subvolume_create_with_desired_mode_in_group(self):
2159 subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
2160
2161 group = self._generate_random_group_name()
2162 # default mode
2163 expected_mode1 = "755"
2164 # desired mode
2165 expected_mode2 = "777"
2166
2167 # create group
2168 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2169
2170 # create subvolume in group
2171 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
2172 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777")
2173 # check whether mode 0777 also works
2174 self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777")
2175
2176 subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group)
2177 subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group)
2178 subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group)
2179
2180 # check subvolume's mode
2181 actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip()
2182 actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip()
2183 actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip()
2184 self.assertEqual(actual_mode1, expected_mode1)
2185 self.assertEqual(actual_mode2, expected_mode2)
2186 self.assertEqual(actual_mode3, expected_mode2)
2187
2188 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
2189 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
2190 self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
2191 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2192
2193 # verify trash dir is clean
2194 self._wait_for_trash_empty()
2195
2196 def test_subvolume_create_with_desired_uid_gid(self):
2197 """
2198 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
2199 expected values.
2200 """
2201 uid = 1000
2202 gid = 1000
2203
2204 # create subvolume
2205 subvolname = self._generate_random_subvolume_name()
2206 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid))
2207
2208 # make sure it exists
2209 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2210 self.assertNotEqual(subvolpath, None)
2211
2212 # verify the uid and gid
2213 suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip())
2214 sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip())
2215 self.assertEqual(uid, suid)
2216 self.assertEqual(gid, sgid)
2217
2218 # remove subvolume
2219 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2220
2221 # verify trash dir is clean
2222 self._wait_for_trash_empty()
2223
2224 def test_subvolume_create_with_invalid_data_pool_layout(self):
2225 subvolume = self._generate_random_subvolume_name()
2226 data_pool = "invalid_pool"
2227 # create subvolume with invalid data pool layout
2228 try:
2229 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
2230 except CommandFailedError as ce:
2231 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid pool layout")
2232 else:
2233 self.fail("expected the 'fs subvolume create' command to fail")
2234
2235 # verify trash dir is clean
2236 self._wait_for_trash_empty()
2237
2238 def test_subvolume_create_with_invalid_size(self):
2239 # create subvolume with an invalid size -1
2240 subvolume = self._generate_random_subvolume_name()
2241 try:
2242 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--size", "-1")
2243 except CommandFailedError as ce:
2244 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid size")
2245 else:
2246 self.fail("expected the 'fs subvolume create' command to fail")
2247
2248 # verify trash dir is clean
2249 self._wait_for_trash_empty()
2250
2251 def test_subvolume_create_and_ls_providing_group_as_nogroup(self):
2252 """
2253 That a 'subvolume create' and 'subvolume ls' should throw
2254 permission denied error if option --group=_nogroup is provided.
2255 """
2256
2257 subvolname = self._generate_random_subvolume_name()
2258
2259 # try to create subvolume providing --group_name=_nogroup option
2260 try:
2261 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", "_nogroup")
2262 except CommandFailedError as ce:
2263 self.assertEqual(ce.exitstatus, errno.EPERM)
2264 else:
2265 self.fail("expected the 'fs subvolume create' command to fail")
2266
2267 # create subvolume
2268 self._fs_cmd("subvolume", "create", self.volname, subvolname)
2269
2270 # try to list subvolumes providing --group_name=_nogroup option
2271 try:
2272 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_nogroup")
2273 except CommandFailedError as ce:
2274 self.assertEqual(ce.exitstatus, errno.EPERM)
2275 else:
2276 self.fail("expected the 'fs subvolume ls' command to fail")
2277
2278 # list subvolumes
2279 self._fs_cmd("subvolume", "ls", self.volname)
2280
2281 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2282
2283 # verify trash dir is clean.
2284 self._wait_for_trash_empty()
2285
2286 def test_subvolume_expand(self):
2287 """
2288 That a subvolume can be expanded in size and its quota matches the expected size.
2289 """
2290
2291 # create subvolume
2292 subvolname = self._generate_random_subvolume_name()
2293 osize = self.DEFAULT_FILE_SIZE*1024*1024
2294 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
2295
2296 # make sure it exists
2297 subvolpath = self._get_subvolume_path(self.volname, subvolname)
2298 self.assertNotEqual(subvolpath, None)
2299
2300 # expand the subvolume
2301 nsize = osize*2
2302 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
2303
2304 # verify the quota
2305 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
2306 self.assertEqual(size, nsize)
2307
2308 # remove subvolume
2309 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
2310
2311 # verify trash dir is clean
2312 self._wait_for_trash_empty()
2313
2314 def test_subvolume_info(self):
2315 # tests the 'fs subvolume info' command
2316
2317 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
2318 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
2319 "type", "uid", "features", "state"]
2320
2321 # create subvolume
2322 subvolume = self._generate_random_subvolume_name()
2323 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2324
2325 # get subvolume metadata
2326 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
2327 for md in subvol_md:
2328 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
2329
2330 self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
2331 self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
2332 self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty")
2333 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
2334
2335 self.assertEqual(len(subvol_info["features"]), 3,
2336 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
2337 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2338 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
2339
2340 nsize = self.DEFAULT_FILE_SIZE*1024*1024
2341 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
2342
2343 # get subvolume metadata after quota set
2344 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
2345 for md in subvol_md:
2346 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
2347
2348 self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
2349 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
2350 self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume")
2351 self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
2352
2353 self.assertEqual(len(subvol_info["features"]), 3,
2354 msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
2355 for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2356 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
2357
2358 # remove subvolumes
2359 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2360
2361 # verify trash dir is clean
2362 self._wait_for_trash_empty()
2363
2364 def test_subvolume_ls(self):
2365 # tests the 'fs subvolume ls' command
2366
2367 subvolumes = []
2368
2369 # create subvolumes
2370 subvolumes = self._generate_random_subvolume_name(3)
2371 for subvolume in subvolumes:
2372 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2373
2374 # list subvolumes
2375 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2376 if len(subvolumels) == 0:
2377 self.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
2378 else:
2379 subvolnames = [subvolume['name'] for subvolume in subvolumels]
2380 if collections.Counter(subvolnames) != collections.Counter(subvolumes):
2381 self.fail("Error creating or listing subvolumes")
2382
2383 # remove subvolume
2384 for subvolume in subvolumes:
2385 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2386
2387 # verify trash dir is clean
2388 self._wait_for_trash_empty()
2389
2390 def test_subvolume_ls_with_groupname_as_internal_directory(self):
2391 # tests the 'fs subvolume ls' command when the default groupname as internal directories
2392 # Eg: '_nogroup', '_legacy', '_deleting', '_index'.
2393 # Expecting 'fs subvolume ls' will be fail with errno EINVAL for '_legacy', '_deleting', '_index'
2394 # Expecting 'fs subvolume ls' will be fail with errno EPERM for '_nogroup'
2395
2396 # try to list subvolumes providing --group_name=_nogroup option
2397 try:
2398 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_nogroup")
2399 except CommandFailedError as ce:
2400 self.assertEqual(ce.exitstatus, errno.EPERM)
2401 else:
2402 self.fail("expected the 'fs subvolume ls' command to fail with error 'EPERM' for _nogroup")
2403
2404 # try to list subvolumes providing --group_name=_legacy option
2405 try:
2406 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_legacy")
2407 except CommandFailedError as ce:
2408 self.assertEqual(ce.exitstatus, errno.EINVAL)
2409 else:
2410 self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _legacy")
2411
2412 # try to list subvolumes providing --group_name=_deleting option
2413 try:
2414 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_deleting")
2415 except CommandFailedError as ce:
2416 self.assertEqual(ce.exitstatus, errno.EINVAL)
2417 else:
2418 self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _deleting")
2419
2420 # try to list subvolumes providing --group_name=_index option
2421 try:
2422 self._fs_cmd("subvolume", "ls", self.volname, "--group_name", "_index")
2423 except CommandFailedError as ce:
2424 self.assertEqual(ce.exitstatus, errno.EINVAL)
2425 else:
2426 self.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _index")
2427
2428 def test_subvolume_ls_for_notexistent_default_group(self):
2429 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
2430 # prerequisite: we expect that the volume is created and the default group _nogroup is
2431 # NOT created (i.e. a subvolume without group is not created)
2432
2433 # list subvolumes
2434 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
2435 if len(subvolumels) > 0:
2436 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
2437
2438 def test_subvolume_marked(self):
2439 """
2440 ensure a subvolume is marked with the ceph.dir.subvolume xattr
2441 """
2442 subvolume = self._generate_random_subvolume_name()
2443
2444 # create subvolume
2445 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2446
2447 # getpath
2448 subvolpath = self._get_subvolume_path(self.volname, subvolume)
2449
2450 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
2451 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
2452 # outside the subvolume
2453 dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location')
2454 srcpath = os.path.join(self.mount_a.mountpoint, subvolpath)
2455 rename_script = dedent("""
2456 import os
2457 import errno
2458 try:
2459 os.rename("{src}", "{dst}")
2460 except OSError as e:
2461 if e.errno != errno.EXDEV:
2462 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
2463 else:
2464 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
2465 """)
2466 self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath), sudo=True)
2467
2468 # remove subvolume
2469 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2470
2471 # verify trash dir is clean
2472 self._wait_for_trash_empty()
2473
2474 def test_subvolume_pin_export(self):
2475 self.fs.set_max_mds(2)
2476 status = self.fs.wait_for_daemons()
2477
2478 subvolume = self._generate_random_subvolume_name()
2479 self._fs_cmd("subvolume", "create", self.volname, subvolume)
2480 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "export", "1")
2481 path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
2482 path = os.path.dirname(path) # get subvolume path
2483
2484 self._get_subtrees(status=status, rank=1)
2485 self._wait_subtrees([(path, 1)], status=status)
2486
2487 # remove subvolume
2488 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
2489
2490 # verify trash dir is clean
2491 self._wait_for_trash_empty()
2492
2493 ### authorize operations
2494
2495 def test_authorize_deauthorize_legacy_subvolume(self):
2496 subvolume = self._generate_random_subvolume_name()
2497 group = self._generate_random_group_name()
2498 authid = "alice"
2499
2500 guest_mount = self.mount_b
2501 guest_mount.umount_wait()
2502
2503 # emulate a old-fashioned subvolume in a custom group
2504 createpath = os.path.join(".", "volumes", group, subvolume)
2505 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
2506
2507 # add required xattrs to subvolume
2508 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
2509 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
2510
2511 mount_path = os.path.join("/", "volumes", group, subvolume)
2512
2513 # authorize guest authID read-write access to subvolume
2514 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2515 "--group_name", group, "--tenant_id", "tenant_id")
2516
2517 # guest authID should exist
2518 existing_ids = [a['entity'] for a in self.auth_list()]
2519 self.assertIn("client.{0}".format(authid), existing_ids)
2520
2521 # configure credentials for guest client
2522 self._configure_guest_auth(guest_mount, authid, key)
2523
2524 # mount the subvolume, and write to it
2525 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2526 guest_mount.write_n_mb("data.bin", 1)
2527
2528 # authorize guest authID read access to subvolume
2529 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2530 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
2531
2532 # guest client sees the change in access level to read only after a
2533 # remount of the subvolume.
2534 guest_mount.umount_wait()
2535 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2536
2537 # read existing content of the subvolume
2538 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
2539 # cannot write into read-only subvolume
2540 with self.assertRaises(CommandFailedError):
2541 guest_mount.write_n_mb("rogue.bin", 1)
2542
2543 # cleanup
2544 guest_mount.umount_wait()
2545 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
2546 "--group_name", group)
2547 # guest authID should no longer exist
2548 existing_ids = [a['entity'] for a in self.auth_list()]
2549 self.assertNotIn("client.{0}".format(authid), existing_ids)
2550 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2551 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2552
2553 def test_authorize_deauthorize_subvolume(self):
2554 subvolume = self._generate_random_subvolume_name()
2555 group = self._generate_random_group_name()
2556 authid = "alice"
2557
2558 guest_mount = self.mount_b
2559 guest_mount.umount_wait()
2560
2561 # create group
2562 self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=777")
2563
2564 # create subvolume in group
2565 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2566 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
2567 "--group_name", group).rstrip()
2568
2569 # authorize guest authID read-write access to subvolume
2570 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2571 "--group_name", group, "--tenant_id", "tenant_id")
2572
2573 # guest authID should exist
2574 existing_ids = [a['entity'] for a in self.auth_list()]
2575 self.assertIn("client.{0}".format(authid), existing_ids)
2576
2577 # configure credentials for guest client
2578 self._configure_guest_auth(guest_mount, authid, key)
2579
2580 # mount the subvolume, and write to it
2581 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2582 guest_mount.write_n_mb("data.bin", 1)
2583
2584 # authorize guest authID read access to subvolume
2585 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid,
2586 "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r")
2587
2588 # guest client sees the change in access level to read only after a
2589 # remount of the subvolume.
2590 guest_mount.umount_wait()
2591 guest_mount.mount_wait(cephfs_mntpt=mount_path)
2592
2593 # read existing content of the subvolume
2594 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
2595 # cannot write into read-only subvolume
2596 with self.assertRaises(CommandFailedError):
2597 guest_mount.write_n_mb("rogue.bin", 1)
2598
2599 # cleanup
2600 guest_mount.umount_wait()
2601 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid,
2602 "--group_name", group)
2603 # guest authID should no longer exist
2604 existing_ids = [a['entity'] for a in self.auth_list()]
2605 self.assertNotIn("client.{0}".format(authid), existing_ids)
2606 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2607 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2608
2609 def test_multitenant_subvolumes(self):
2610 """
2611 That subvolume access can be restricted to a tenant.
2612
2613 That metadata used to enforce tenant isolation of
2614 subvolumes is stored as a two-way mapping between auth
2615 IDs and subvolumes that they're authorized to access.
2616 """
2617 subvolume = self._generate_random_subvolume_name()
2618 group = self._generate_random_group_name()
2619
2620 guest_mount = self.mount_b
2621
2622 # Guest clients belonging to different tenants, but using the same
2623 # auth ID.
2624 auth_id = "alice"
2625 guestclient_1 = {
2626 "auth_id": auth_id,
2627 "tenant_id": "tenant1",
2628 }
2629 guestclient_2 = {
2630 "auth_id": auth_id,
2631 "tenant_id": "tenant2",
2632 }
2633
2634 # create group
2635 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2636
2637 # create subvolume in group
2638 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2639
2640 # Check that subvolume metadata file is created on subvolume creation.
2641 subvol_metadata_filename = "_{0}:{1}.meta".format(group, subvolume)
2642 self.assertIn(subvol_metadata_filename, guest_mount.ls("volumes"))
2643
2644 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
2645 # 'tenant1', with 'rw' access to the volume.
2646 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2647 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2648
2649 # Check that auth metadata file for auth ID 'alice', is
2650 # created on authorizing 'alice' access to the subvolume.
2651 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2652 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2653
2654 # Verify that the auth metadata file stores the tenant ID that the
2655 # auth ID belongs to, the auth ID's authorized access levels
2656 # for different subvolumes, versioning details, etc.
2657 expected_auth_metadata = {
2658 "version": 5,
2659 "compat_version": 6,
2660 "dirty": False,
2661 "tenant_id": "tenant1",
2662 "subvolumes": {
2663 "{0}/{1}".format(group,subvolume): {
2664 "dirty": False,
2665 "access_level": "rw"
2666 }
2667 }
2668 }
2669
2670 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
2671 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
2672 del expected_auth_metadata["version"]
2673 del auth_metadata["version"]
2674 self.assertEqual(expected_auth_metadata, auth_metadata)
2675
2676 # Verify that the subvolume metadata file stores info about auth IDs
2677 # and their access levels to the subvolume, versioning details, etc.
2678 expected_subvol_metadata = {
2679 "version": 1,
2680 "compat_version": 1,
2681 "auths": {
2682 "alice": {
2683 "dirty": False,
2684 "access_level": "rw"
2685 }
2686 }
2687 }
2688 subvol_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(subvol_metadata_filename)))
2689
2690 self.assertGreaterEqual(subvol_metadata["version"], expected_subvol_metadata["version"])
2691 del expected_subvol_metadata["version"]
2692 del subvol_metadata["version"]
2693 self.assertEqual(expected_subvol_metadata, subvol_metadata)
2694
2695 # Cannot authorize 'guestclient_2' to access the volume.
2696 # It uses auth ID 'alice', which has already been used by a
2697 # 'guestclient_1' belonging to an another tenant for accessing
2698 # the volume.
2699
2700 try:
2701 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_2["auth_id"],
2702 "--group_name", group, "--tenant_id", guestclient_2["tenant_id"])
2703 except CommandFailedError as ce:
2704 self.assertEqual(ce.exitstatus, errno.EPERM,
2705 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
2706 else:
2707 self.fail("expected the 'fs subvolume authorize' command to fail")
2708
2709 # Check that auth metadata file is cleaned up on removing
2710 # auth ID's only access to a volume.
2711
2712 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
2713 "--group_name", group)
2714 self.assertNotIn(auth_metadata_filename, guest_mount.ls("volumes"))
2715
2716 # Check that subvolume metadata file is cleaned up on subvolume deletion.
2717 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2718 self.assertNotIn(subvol_metadata_filename, guest_mount.ls("volumes"))
2719
2720 # clean up
2721 guest_mount.umount_wait()
2722 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2723
2724 def test_subvolume_authorized_list(self):
2725 subvolume = self._generate_random_subvolume_name()
2726 group = self._generate_random_group_name()
2727 authid1 = "alice"
2728 authid2 = "guest1"
2729 authid3 = "guest2"
2730
2731 # create group
2732 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2733
2734 # create subvolume in group
2735 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2736
2737 # authorize alice authID read-write access to subvolume
2738 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid1,
2739 "--group_name", group)
2740 # authorize guest1 authID read-write access to subvolume
2741 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid2,
2742 "--group_name", group)
2743 # authorize guest2 authID read access to subvolume
2744 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid3,
2745 "--group_name", group, "--access_level", "r")
2746
2747 # list authorized-ids of the subvolume
2748 expected_auth_list = [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
2749 auth_list = json.loads(self._fs_cmd('subvolume', 'authorized_list', self.volname, subvolume, "--group_name", group))
2750 self.assertCountEqual(expected_auth_list, auth_list)
2751
2752 # cleanup
2753 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid1,
2754 "--group_name", group)
2755 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid2,
2756 "--group_name", group)
2757 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid3,
2758 "--group_name", group)
2759 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2760 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2761
2762 def test_authorize_auth_id_not_created_by_mgr_volumes(self):
2763 """
2764 If the auth_id already exists and is not created by mgr plugin,
2765 it's not allowed to authorize the auth-id by default.
2766 """
2767
2768 subvolume = self._generate_random_subvolume_name()
2769 group = self._generate_random_group_name()
2770
2771 # Create auth_id
2772 self.fs.mon_manager.raw_cluster_cmd(
2773 "auth", "get-or-create", "client.guest1",
2774 "mds", "allow *",
2775 "osd", "allow rw",
2776 "mon", "allow *"
2777 )
2778
2779 auth_id = "guest1"
2780 guestclient_1 = {
2781 "auth_id": auth_id,
2782 "tenant_id": "tenant1",
2783 }
2784
2785 # create group
2786 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2787
2788 # create subvolume in group
2789 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2790
2791 try:
2792 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2793 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2794 except CommandFailedError as ce:
2795 self.assertEqual(ce.exitstatus, errno.EPERM,
2796 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
2797 else:
2798 self.fail("expected the 'fs subvolume authorize' command to fail")
2799
2800 # clean up
2801 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2802 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2803 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2804
2805 def test_authorize_allow_existing_id_option(self):
2806 """
2807 If the auth_id already exists and is not created by mgr volumes,
2808 it's not allowed to authorize the auth-id by default but is
2809 allowed with option allow_existing_id.
2810 """
2811
2812 subvolume = self._generate_random_subvolume_name()
2813 group = self._generate_random_group_name()
2814
2815 # Create auth_id
2816 self.fs.mon_manager.raw_cluster_cmd(
2817 "auth", "get-or-create", "client.guest1",
2818 "mds", "allow *",
2819 "osd", "allow rw",
2820 "mon", "allow *"
2821 )
2822
2823 auth_id = "guest1"
2824 guestclient_1 = {
2825 "auth_id": auth_id,
2826 "tenant_id": "tenant1",
2827 }
2828
2829 # create group
2830 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2831
2832 # create subvolume in group
2833 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2834
2835 # Cannot authorize 'guestclient_1' to access the volume by default,
2836 # which already exists and not created by mgr volumes but is allowed
2837 # with option 'allow_existing_id'.
2838 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2839 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"], "--allow-existing-id")
2840
2841 # clean up
2842 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
2843 "--group_name", group)
2844 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2845 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2846 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2847
2848 def test_deauthorize_auth_id_after_out_of_band_update(self):
2849 """
2850 If the auth_id authorized by mgr/volumes plugin is updated
2851 out of band, the auth_id should not be deleted after a
2852 deauthorize. It should only remove caps associated with it.
2853 """
2854
2855 subvolume = self._generate_random_subvolume_name()
2856 group = self._generate_random_group_name()
2857
2858 auth_id = "guest1"
2859 guestclient_1 = {
2860 "auth_id": auth_id,
2861 "tenant_id": "tenant1",
2862 }
2863
2864 # create group
2865 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2866
2867 # create subvolume in group
2868 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2869
2870 # Authorize 'guestclient_1' to access the subvolume.
2871 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2872 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2873
2874 subvol_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume,
2875 "--group_name", group).rstrip()
2876
2877 # Update caps for guestclient_1 out of band
2878 out = self.fs.mon_manager.raw_cluster_cmd(
2879 "auth", "caps", "client.guest1",
2880 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path),
2881 "osd", "allow rw pool=cephfs_data",
2882 "mon", "allow r",
2883 "mgr", "allow *"
2884 )
2885
2886 # Deauthorize guestclient_1
2887 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
2888
2889 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
2890 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
2891 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
2892
2893 self.assertEqual("client.guest1", out[0]["entity"])
2894 self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"])
2895 self.assertEqual("allow *", out[0]["caps"]["mgr"])
2896 self.assertNotIn("osd", out[0]["caps"])
2897
2898 # clean up
2899 out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2900 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2901 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2902
2903 def test_recover_auth_metadata_during_authorize(self):
2904 """
2905 That auth metadata manager can recover from partial auth updates using
2906 metadata files, which store auth info and its update status info. This
2907 test validates the recovery during authorize.
2908 """
2909
2910 guest_mount = self.mount_b
2911
2912 subvolume = self._generate_random_subvolume_name()
2913 group = self._generate_random_group_name()
2914
2915 auth_id = "guest1"
2916 guestclient_1 = {
2917 "auth_id": auth_id,
2918 "tenant_id": "tenant1",
2919 }
2920
2921 # create group
2922 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2923
2924 # create subvolume in group
2925 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
2926
2927 # Authorize 'guestclient_1' to access the subvolume.
2928 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2929 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2930
2931 # Check that auth metadata file for auth ID 'guest1', is
2932 # created on authorizing 'guest1' access to the subvolume.
2933 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2934 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2935 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2936
2937 # Induce partial auth update state by modifying the auth metadata file,
2938 # and then run authorize again.
2939 guest_mount.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
2940
2941 # Authorize 'guestclient_1' to access the subvolume.
2942 self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
2943 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2944
2945 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2946 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
2947
2948 # clean up
2949 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
2950 guest_mount.umount_wait()
2951 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
2952 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
2953 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
2954
2955 def test_recover_auth_metadata_during_deauthorize(self):
2956 """
2957 That auth metadata manager can recover from partial auth updates using
2958 metadata files, which store auth info and its update status info. This
2959 test validates the recovery during deauthorize.
2960 """
2961
2962 guest_mount = self.mount_b
2963
2964 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
2965 group = self._generate_random_group_name()
2966
2967 guestclient_1 = {
2968 "auth_id": "guest1",
2969 "tenant_id": "tenant1",
2970 }
2971
2972 # create group
2973 self._fs_cmd("subvolumegroup", "create", self.volname, group)
2974
2975 # create subvolumes in group
2976 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
2977 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
2978
2979 # Authorize 'guestclient_1' to access the subvolume1.
2980 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
2981 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2982
2983 # Check that auth metadata file for auth ID 'guest1', is
2984 # created on authorizing 'guest1' access to the subvolume1.
2985 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
2986 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
2987 expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
2988
2989 # Authorize 'guestclient_1' to access the subvolume2.
2990 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
2991 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
2992
2993 # Induce partial auth update state by modifying the auth metadata file,
2994 # and then run de-authorize.
2995 guest_mount.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
2996
2997 # Deauthorize 'guestclient_1' to access the subvolume2.
2998 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"],
2999 "--group_name", group)
3000
3001 auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename)))
3002 self.assertEqual(auth_metadata_content, expected_auth_metadata_content)
3003
3004 # clean up
3005 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
3006 guest_mount.umount_wait()
3007 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
3008 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
3009 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
3010 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3011
3012 def test_update_old_style_auth_metadata_to_new_during_authorize(self):
3013 """
3014 CephVolumeClient stores the subvolume data in auth metadata file with
3015 'volumes' key as there was no subvolume namespace. It doesn't makes sense
3016 with mgr/volumes. This test validates the transparent update of 'volumes'
3017 key to 'subvolumes' key in auth metadata file during authorize.
3018 """
3019
3020 guest_mount = self.mount_b
3021
3022 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
3023 group = self._generate_random_group_name()
3024
3025 auth_id = "guest1"
3026 guestclient_1 = {
3027 "auth_id": auth_id,
3028 "tenant_id": "tenant1",
3029 }
3030
3031 # create group
3032 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3033
3034 # create subvolumes in group
3035 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
3036 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
3037
3038 # Authorize 'guestclient_1' to access the subvolume1.
3039 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
3040 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3041
3042 # Check that auth metadata file for auth ID 'guest1', is
3043 # created on authorizing 'guest1' access to the subvolume1.
3044 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
3045 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
3046
3047 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
3048 guest_mount.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
3049
3050 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
3051 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
3052 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3053
3054 expected_auth_metadata = {
3055 "version": 5,
3056 "compat_version": 6,
3057 "dirty": False,
3058 "tenant_id": "tenant1",
3059 "subvolumes": {
3060 "{0}/{1}".format(group,subvolume1): {
3061 "dirty": False,
3062 "access_level": "rw"
3063 },
3064 "{0}/{1}".format(group,subvolume2): {
3065 "dirty": False,
3066 "access_level": "rw"
3067 }
3068 }
3069 }
3070
3071 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
3072
3073 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
3074 del expected_auth_metadata["version"]
3075 del auth_metadata["version"]
3076 self.assertEqual(expected_auth_metadata, auth_metadata)
3077
3078 # clean up
3079 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
3080 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
3081 guest_mount.umount_wait()
3082 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
3083 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
3084 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
3085 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3086
3087 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self):
3088 """
3089 CephVolumeClient stores the subvolume data in auth metadata file with
3090 'volumes' key as there was no subvolume namespace. It doesn't makes sense
3091 with mgr/volumes. This test validates the transparent update of 'volumes'
3092 key to 'subvolumes' key in auth metadata file during deauthorize.
3093 """
3094
3095 guest_mount = self.mount_b
3096
3097 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
3098 group = self._generate_random_group_name()
3099
3100 auth_id = "guest1"
3101 guestclient_1 = {
3102 "auth_id": auth_id,
3103 "tenant_id": "tenant1",
3104 }
3105
3106 # create group
3107 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3108
3109 # create subvolumes in group
3110 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
3111 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group)
3112
3113 # Authorize 'guestclient_1' to access the subvolume1.
3114 self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"],
3115 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3116
3117 # Authorize 'guestclient_1' to access the subvolume2.
3118 self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
3119 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3120
3121 # Check that auth metadata file for auth ID 'guest1', is created.
3122 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
3123 self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
3124
3125 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
3126 guest_mount.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
3127
3128 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
3129 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
3130
3131 expected_auth_metadata = {
3132 "version": 5,
3133 "compat_version": 6,
3134 "dirty": False,
3135 "tenant_id": "tenant1",
3136 "subvolumes": {
3137 "{0}/{1}".format(group,subvolume1): {
3138 "dirty": False,
3139 "access_level": "rw"
3140 }
3141 }
3142 }
3143
3144 auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename)))
3145
3146 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
3147 del expected_auth_metadata["version"]
3148 del auth_metadata["version"]
3149 self.assertEqual(expected_auth_metadata, auth_metadata)
3150
3151 # clean up
3152 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
3153 guest_mount.umount_wait()
3154 self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
3155 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
3156 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
3157 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3158
3159 def test_subvolume_evict_client(self):
3160 """
3161 That a subvolume client can be evicted based on the auth ID
3162 """
3163
3164 subvolumes = self._generate_random_subvolume_name(2)
3165 group = self._generate_random_group_name()
3166
3167 # create group
3168 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3169
3170 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
3171 for i in range(0, 2):
3172 self.mounts[i].umount_wait()
3173 guest_mounts = (self.mounts[0], self.mounts[1])
3174 auth_id = "guest"
3175 guestclient_1 = {
3176 "auth_id": auth_id,
3177 "tenant_id": "tenant1",
3178 }
3179
3180 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
3181 # subvolumes. Mount the two subvolumes. Write data to the volumes.
3182 for i in range(2):
3183 # Create subvolume.
3184 self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group, "--mode=777")
3185
3186 # authorize guest authID read-write access to subvolume
3187 key = self._fs_cmd("subvolume", "authorize", self.volname, subvolumes[i], guestclient_1["auth_id"],
3188 "--group_name", group, "--tenant_id", guestclient_1["tenant_id"])
3189
3190 mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolumes[i],
3191 "--group_name", group).rstrip()
3192 # configure credentials for guest client
3193 self._configure_guest_auth(guest_mounts[i], auth_id, key)
3194
3195 # mount the subvolume, and write to it
3196 guest_mounts[i].mount_wait(cephfs_mntpt=mount_path)
3197 guest_mounts[i].write_n_mb("data.bin", 1)
3198
3199 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
3200 # one volume.
3201 self._fs_cmd("subvolume", "evict", self.volname, subvolumes[0], auth_id, "--group_name", group)
3202
3203 # Evicted guest client, guest_mounts[0], should not be able to do
3204 # anymore metadata ops. It should start failing all operations
3205 # when it sees that its own address is in the blocklist.
3206 try:
3207 guest_mounts[0].write_n_mb("rogue.bin", 1)
3208 except CommandFailedError:
3209 pass
3210 else:
3211 raise RuntimeError("post-eviction write should have failed!")
3212
3213 # The blocklisted guest client should now be unmountable
3214 guest_mounts[0].umount_wait()
3215
3216 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
3217 # has mounted the other volume, should be able to use its volume
3218 # unaffected.
3219 guest_mounts[1].write_n_mb("data.bin.1", 1)
3220
3221 # Cleanup.
3222 guest_mounts[1].umount_wait()
3223 for i in range(2):
3224 self._fs_cmd("subvolume", "deauthorize", self.volname, subvolumes[i], auth_id, "--group_name", group)
3225 self._fs_cmd("subvolume", "rm", self.volname, subvolumes[i], "--group_name", group)
3226 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3227
3228 def test_subvolume_pin_random(self):
3229 self.fs.set_max_mds(2)
3230 self.fs.wait_for_daemons()
3231 self.config_set('mds', 'mds_export_ephemeral_random', True)
3232
3233 subvolume = self._generate_random_subvolume_name()
3234 self._fs_cmd("subvolume", "create", self.volname, subvolume)
3235 self._fs_cmd("subvolume", "pin", self.volname, subvolume, "random", ".01")
3236 # no verification
3237
3238 # remove subvolume
3239 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3240
3241 # verify trash dir is clean
3242 self._wait_for_trash_empty()
3243
3244 def test_subvolume_resize_fail_invalid_size(self):
3245 """
3246 That a subvolume cannot be resized to an invalid size and the quota did not change
3247 """
3248
3249 osize = self.DEFAULT_FILE_SIZE*1024*1024
3250 # create subvolume
3251 subvolname = self._generate_random_subvolume_name()
3252 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
3253
3254 # make sure it exists
3255 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3256 self.assertNotEqual(subvolpath, None)
3257
3258 # try to resize the subvolume with an invalid size -10
3259 nsize = -10
3260 try:
3261 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3262 except CommandFailedError as ce:
3263 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
3264 else:
3265 self.fail("expected the 'fs subvolume resize' command to fail")
3266
3267 # verify the quota did not change
3268 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3269 self.assertEqual(size, osize)
3270
3271 # remove subvolume
3272 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3273
3274 # verify trash dir is clean
3275 self._wait_for_trash_empty()
3276
3277 def test_subvolume_resize_fail_zero_size(self):
3278 """
3279 That a subvolume cannot be resized to a zero size and the quota did not change
3280 """
3281
3282 osize = self.DEFAULT_FILE_SIZE*1024*1024
3283 # create subvolume
3284 subvolname = self._generate_random_subvolume_name()
3285 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
3286
3287 # make sure it exists
3288 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3289 self.assertNotEqual(subvolpath, None)
3290
3291 # try to resize the subvolume with size 0
3292 nsize = 0
3293 try:
3294 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3295 except CommandFailedError as ce:
3296 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
3297 else:
3298 self.fail("expected the 'fs subvolume resize' command to fail")
3299
3300 # verify the quota did not change
3301 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3302 self.assertEqual(size, osize)
3303
3304 # remove subvolume
3305 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3306
3307 # verify trash dir is clean
3308 self._wait_for_trash_empty()
3309
3310 def test_subvolume_resize_quota_lt_used_size(self):
3311 """
3312 That a subvolume can be resized to a size smaller than the current used size
3313 and the resulting quota matches the expected size.
3314 """
3315
3316 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
3317 # create subvolume
3318 subvolname = self._generate_random_subvolume_name()
3319 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
3320
3321 # make sure it exists
3322 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3323 self.assertNotEqual(subvolpath, None)
3324
3325 # create one file of 10MB
3326 file_size=self.DEFAULT_FILE_SIZE*10
3327 number_of_files=1
3328 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3329 number_of_files,
3330 file_size))
3331 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1)
3332 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3333
3334 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
3335 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
3336 if isinstance(self.mount_a, FuseMount):
3337 # kclient dir does not have size==rbytes
3338 self.assertEqual(usedsize, susedsize)
3339
3340 # shrink the subvolume
3341 nsize = usedsize // 2
3342 try:
3343 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3344 except CommandFailedError:
3345 self.fail("expected the 'fs subvolume resize' command to succeed")
3346
3347 # verify the quota
3348 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3349 self.assertEqual(size, nsize)
3350
3351 # remove subvolume
3352 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3353
3354 # verify trash dir is clean
3355 self._wait_for_trash_empty()
3356
3357 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self):
3358 """
3359 That a subvolume cannot be resized to a size smaller than the current used size
3360 when --no_shrink is given and the quota did not change.
3361 """
3362
3363 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
3364 # create subvolume
3365 subvolname = self._generate_random_subvolume_name()
3366 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
3367
3368 # make sure it exists
3369 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3370 self.assertNotEqual(subvolpath, None)
3371
3372 # create one file of 10MB
3373 file_size=self.DEFAULT_FILE_SIZE*10
3374 number_of_files=1
3375 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3376 number_of_files,
3377 file_size))
3378 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2)
3379 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3380
3381 usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes"))
3382 susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip())
3383 if isinstance(self.mount_a, FuseMount):
3384 # kclient dir does not have size==rbytes
3385 self.assertEqual(usedsize, susedsize)
3386
3387 # shrink the subvolume
3388 nsize = usedsize // 2
3389 try:
3390 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink")
3391 except CommandFailedError as ce:
3392 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size")
3393 else:
3394 self.fail("expected the 'fs subvolume resize' command to fail")
3395
3396 # verify the quota did not change
3397 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3398 self.assertEqual(size, osize)
3399
3400 # remove subvolume
3401 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3402
3403 # verify trash dir is clean
3404 self._wait_for_trash_empty()
3405
3406 def test_subvolume_resize_expand_on_full_subvolume(self):
3407 """
3408 That the subvolume can be expanded from a full subvolume and future writes succeed.
3409 """
3410
3411 osize = self.DEFAULT_FILE_SIZE*1024*1024*10
3412 # create subvolume of quota 10MB and make sure it exists
3413 subvolname = self._generate_random_subvolume_name()
3414 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
3415 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3416 self.assertNotEqual(subvolpath, None)
3417
3418 # create one file of size 10MB and write
3419 file_size=self.DEFAULT_FILE_SIZE*10
3420 number_of_files=1
3421 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3422 number_of_files,
3423 file_size))
3424 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3)
3425 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3426
3427 # create a file of size 5MB and try write more
3428 file_size=file_size // 2
3429 number_of_files=1
3430 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3431 number_of_files,
3432 file_size))
3433 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4)
3434 try:
3435 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3436 except CommandFailedError:
3437 # Not able to write. So expand the subvolume more and try writing the 5MB file again
3438 nsize = osize*2
3439 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3440 try:
3441 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3442 except CommandFailedError:
3443 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3444 "to succeed".format(subvolname, number_of_files, file_size))
3445 else:
3446 self.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3447 "to fail".format(subvolname, number_of_files, file_size))
3448
3449 # remove subvolume
3450 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3451
3452 # verify trash dir is clean
3453 self._wait_for_trash_empty()
3454
3455 def test_subvolume_resize_infinite_size(self):
3456 """
3457 That a subvolume can be resized to an infinite size by unsetting its quota.
3458 """
3459
3460 # create subvolume
3461 subvolname = self._generate_random_subvolume_name()
3462 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
3463 str(self.DEFAULT_FILE_SIZE*1024*1024))
3464
3465 # make sure it exists
3466 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3467 self.assertNotEqual(subvolpath, None)
3468
3469 # resize inf
3470 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
3471
3472 # verify that the quota is None
3473 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
3474 self.assertEqual(size, None)
3475
3476 # remove subvolume
3477 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3478
3479 # verify trash dir is clean
3480 self._wait_for_trash_empty()
3481
3482 def test_subvolume_resize_infinite_size_future_writes(self):
3483 """
3484 That a subvolume can be resized to an infinite size and the future writes succeed.
3485 """
3486
3487 # create subvolume
3488 subvolname = self._generate_random_subvolume_name()
3489 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
3490 str(self.DEFAULT_FILE_SIZE*1024*1024*5), "--mode=777")
3491
3492 # make sure it exists
3493 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3494 self.assertNotEqual(subvolpath, None)
3495
3496 # resize inf
3497 self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf")
3498
3499 # verify that the quota is None
3500 size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")
3501 self.assertEqual(size, None)
3502
3503 # create one file of 10MB and try to write
3504 file_size=self.DEFAULT_FILE_SIZE*10
3505 number_of_files=1
3506 log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname,
3507 number_of_files,
3508 file_size))
3509 filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5)
3510
3511 try:
3512 self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size)
3513 except CommandFailedError:
3514 self.fail("expected filling subvolume {0} with {1} file of size {2}MB "
3515 "to succeed".format(subvolname, number_of_files, file_size))
3516
3517 # remove subvolume
3518 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3519
3520 # verify trash dir is clean
3521 self._wait_for_trash_empty()
3522
3523 def test_subvolume_rm_force(self):
3524 # test removing non-existing subvolume with --force
3525 subvolume = self._generate_random_subvolume_name()
3526 try:
3527 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
3528 except CommandFailedError:
3529 self.fail("expected the 'fs subvolume rm --force' command to succeed")
3530
3531 def test_subvolume_exists_with_subvolumegroup_and_subvolume(self):
3532 """Test the presence of any subvolume by specifying the name of subvolumegroup"""
3533
3534 group = self._generate_random_group_name()
3535 subvolume1 = self._generate_random_subvolume_name()
3536 # create subvolumegroup
3537 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3538 # create subvolume in group
3539 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group)
3540 ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group)
3541 self.assertEqual(ret.strip('\n'), "subvolume exists")
3542 # delete subvolume in group
3543 self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
3544 ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group)
3545 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3546 # delete subvolumegroup
3547 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3548
3549 def test_subvolume_exists_with_subvolumegroup_and_no_subvolume(self):
3550 """Test the presence of any subvolume specifying the name
3551 of subvolumegroup and no subvolumes"""
3552
3553 group = self._generate_random_group_name()
3554 # create subvolumegroup
3555 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3556 ret = self._fs_cmd("subvolume", "exist", self.volname, "--group_name", group)
3557 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3558 # delete subvolumegroup
3559 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3560
3561 def test_subvolume_exists_without_subvolumegroup_and_with_subvolume(self):
3562 """Test the presence of any subvolume without specifying the name
3563 of subvolumegroup"""
3564
3565 subvolume1 = self._generate_random_subvolume_name()
3566 # create subvolume
3567 self._fs_cmd("subvolume", "create", self.volname, subvolume1)
3568 ret = self._fs_cmd("subvolume", "exist", self.volname)
3569 self.assertEqual(ret.strip('\n'), "subvolume exists")
3570 # delete subvolume
3571 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
3572 ret = self._fs_cmd("subvolume", "exist", self.volname)
3573 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3574
3575 def test_subvolume_exists_without_subvolumegroup_and_without_subvolume(self):
3576 """Test the presence of any subvolume without any subvolumegroup
3577 and without any subvolume"""
3578
3579 ret = self._fs_cmd("subvolume", "exist", self.volname)
3580 self.assertEqual(ret.strip('\n'), "no subvolume exists")
3581
3582 def test_subvolume_shrink(self):
3583 """
3584 That a subvolume can be shrinked in size and its quota matches the expected size.
3585 """
3586
3587 # create subvolume
3588 subvolname = self._generate_random_subvolume_name()
3589 osize = self.DEFAULT_FILE_SIZE*1024*1024
3590 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
3591
3592 # make sure it exists
3593 subvolpath = self._get_subvolume_path(self.volname, subvolname)
3594 self.assertNotEqual(subvolpath, None)
3595
3596 # shrink the subvolume
3597 nsize = osize // 2
3598 self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
3599
3600 # verify the quota
3601 size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes"))
3602 self.assertEqual(size, nsize)
3603
3604 # remove subvolume
3605 self._fs_cmd("subvolume", "rm", self.volname, subvolname)
3606
3607 # verify trash dir is clean
3608 self._wait_for_trash_empty()
3609
3610 def test_subvolume_retain_snapshot_rm_idempotency(self):
3611 """
3612 ensure subvolume deletion of a subvolume which is already deleted with retain snapshots option passes.
3613 After subvolume deletion with retain snapshots, the subvolume exists until the trash directory (resides inside subvolume)
3614 is cleaned up. The subvolume deletion issued while the trash directory is not empty, should pass and should
3615 not error out with EAGAIN.
3616 """
3617 subvolume = self._generate_random_subvolume_name()
3618 snapshot = self._generate_random_snapshot_name()
3619
3620 # create subvolume
3621 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
3622
3623 # do some IO
3624 self._do_subvolume_io(subvolume, number_of_files=256)
3625
3626 # snapshot subvolume
3627 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
3628
3629 # remove with snapshot retention
3630 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
3631
3632 # remove snapshots (removes retained volume)
3633 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
3634
3635 # remove subvolume (check idempotency)
3636 try:
3637 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
3638 except CommandFailedError as ce:
3639 if ce.exitstatus != errno.ENOENT:
3640 self.fail(f"expected subvolume rm to pass with error: {os.strerror(ce.exitstatus)}")
3641
3642 # verify trash dir is clean
3643 self._wait_for_trash_empty()
3644
3645
3646 def test_subvolume_user_metadata_set(self):
3647 subvolname = self._generate_random_subvolume_name()
3648 group = self._generate_random_group_name()
3649
3650 # create group.
3651 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3652
3653 # create subvolume in group.
3654 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3655
3656 # set metadata for subvolume.
3657 key = "key"
3658 value = "value"
3659 try:
3660 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3661 except CommandFailedError:
3662 self.fail("expected the 'fs subvolume metadata set' command to succeed")
3663
3664 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3665 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3666
3667 # verify trash dir is clean.
3668 self._wait_for_trash_empty()
3669
3670 def test_subvolume_user_metadata_set_idempotence(self):
3671 subvolname = self._generate_random_subvolume_name()
3672 group = self._generate_random_group_name()
3673
3674 # create group.
3675 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3676
3677 # create subvolume in group.
3678 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3679
3680 # set metadata for subvolume.
3681 key = "key"
3682 value = "value"
3683 try:
3684 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3685 except CommandFailedError:
3686 self.fail("expected the 'fs subvolume metadata set' command to succeed")
3687
3688 # set same metadata again for subvolume.
3689 try:
3690 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3691 except CommandFailedError:
3692 self.fail("expected the 'fs subvolume metadata set' command to succeed because it is idempotent operation")
3693
3694 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3695 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3696
3697 # verify trash dir is clean.
3698 self._wait_for_trash_empty()
3699
3700 def test_subvolume_user_metadata_get(self):
3701 subvolname = self._generate_random_subvolume_name()
3702 group = self._generate_random_group_name()
3703
3704 # create group.
3705 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3706
3707 # create subvolume in group.
3708 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3709
3710 # set metadata for subvolume.
3711 key = "key"
3712 value = "value"
3713 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3714
3715 # get value for specified key.
3716 try:
3717 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3718 except CommandFailedError:
3719 self.fail("expected the 'fs subvolume metadata get' command to succeed")
3720
3721 # remove '\n' from returned value.
3722 ret = ret.strip('\n')
3723
3724 # match received value with expected value.
3725 self.assertEqual(value, ret)
3726
3727 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3728 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3729
3730 # verify trash dir is clean.
3731 self._wait_for_trash_empty()
3732
3733 def test_subvolume_user_metadata_get_for_nonexisting_key(self):
3734 subvolname = self._generate_random_subvolume_name()
3735 group = self._generate_random_group_name()
3736
3737 # create group.
3738 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3739
3740 # create subvolume in group.
3741 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3742
3743 # set metadata for subvolume.
3744 key = "key"
3745 value = "value"
3746 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3747
3748 # try to get value for nonexisting key
3749 # Expecting ENOENT exit status because key does not exist
3750 try:
3751 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key_nonexist", "--group_name", group)
3752 except CommandFailedError as e:
3753 self.assertEqual(e.exitstatus, errno.ENOENT)
3754 else:
3755 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
3756
3757 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3758 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3759
3760 # verify trash dir is clean.
3761 self._wait_for_trash_empty()
3762
3763 def test_subvolume_user_metadata_get_for_nonexisting_section(self):
3764 subvolname = self._generate_random_subvolume_name()
3765 group = self._generate_random_group_name()
3766
3767 # create group.
3768 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3769
3770 # create subvolume in group.
3771 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3772
3773 # try to get value for nonexisting key (as section does not exist)
3774 # Expecting ENOENT exit status because key does not exist
3775 try:
3776 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key", "--group_name", group)
3777 except CommandFailedError as e:
3778 self.assertEqual(e.exitstatus, errno.ENOENT)
3779 else:
3780 self.fail("Expected ENOENT because section does not exist")
3781
3782 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3783 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3784
3785 # verify trash dir is clean.
3786 self._wait_for_trash_empty()
3787
3788 def test_subvolume_user_metadata_update(self):
3789 subvolname = self._generate_random_subvolume_name()
3790 group = self._generate_random_group_name()
3791
3792 # create group.
3793 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3794
3795 # create subvolume in group.
3796 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3797
3798 # set metadata for subvolume.
3799 key = "key"
3800 value = "value"
3801 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3802
3803 # update metadata against key.
3804 new_value = "new_value"
3805 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, new_value, "--group_name", group)
3806
3807 # get metadata for specified key of subvolume.
3808 try:
3809 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3810 except CommandFailedError:
3811 self.fail("expected the 'fs subvolume metadata get' command to succeed")
3812
3813 # remove '\n' from returned value.
3814 ret = ret.strip('\n')
3815
3816 # match received value with expected value.
3817 self.assertEqual(new_value, ret)
3818
3819 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3820 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3821
3822 # verify trash dir is clean.
3823 self._wait_for_trash_empty()
3824
3825 def test_subvolume_user_metadata_list(self):
3826 subvolname = self._generate_random_subvolume_name()
3827 group = self._generate_random_group_name()
3828
3829 # create group.
3830 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3831
3832 # create subvolume in group.
3833 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3834
3835 # set metadata for subvolume.
3836 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
3837
3838 for k, v in input_metadata_dict.items():
3839 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, k, v, "--group_name", group)
3840
3841 # list metadata
3842 try:
3843 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
3844 except CommandFailedError:
3845 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
3846
3847 ret_dict = json.loads(ret)
3848
3849 # compare output with expected output
3850 self.assertDictEqual(input_metadata_dict, ret_dict)
3851
3852 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3853 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3854
3855 # verify trash dir is clean.
3856 self._wait_for_trash_empty()
3857
3858 def test_subvolume_user_metadata_list_if_no_metadata_set(self):
3859 subvolname = self._generate_random_subvolume_name()
3860 group = self._generate_random_group_name()
3861
3862 # create group.
3863 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3864
3865 # create subvolume in group.
3866 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3867
3868 # list metadata
3869 try:
3870 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
3871 except CommandFailedError:
3872 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
3873
3874 # remove '\n' from returned value.
3875 ret = ret.strip('\n')
3876
3877 # compare output with expected output
3878 # expecting empty json/dictionary
3879 self.assertEqual(ret, "{}")
3880
3881 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3882 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3883
3884 # verify trash dir is clean.
3885 self._wait_for_trash_empty()
3886
3887 def test_subvolume_user_metadata_remove(self):
3888 subvolname = self._generate_random_subvolume_name()
3889 group = self._generate_random_group_name()
3890
3891 # create group.
3892 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3893
3894 # create subvolume in group.
3895 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3896
3897 # set metadata for subvolume.
3898 key = "key"
3899 value = "value"
3900 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3901
3902 # remove metadata against specified key.
3903 try:
3904 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group)
3905 except CommandFailedError:
3906 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
3907
3908 # confirm key is removed by again fetching metadata
3909 try:
3910 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
3911 except CommandFailedError as e:
3912 self.assertEqual(e.exitstatus, errno.ENOENT)
3913 else:
3914 self.fail("Expected ENOENT because key does not exist")
3915
3916 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3917 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3918
3919 # verify trash dir is clean.
3920 self._wait_for_trash_empty()
3921
3922 def test_subvolume_user_metadata_remove_for_nonexisting_key(self):
3923 subvolname = self._generate_random_subvolume_name()
3924 group = self._generate_random_group_name()
3925
3926 # create group.
3927 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3928
3929 # create subvolume in group.
3930 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3931
3932 # set metadata for subvolume.
3933 key = "key"
3934 value = "value"
3935 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3936
3937 # try to remove value for nonexisting key
3938 # Expecting ENOENT exit status because key does not exist
3939 try:
3940 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key_nonexist", "--group_name", group)
3941 except CommandFailedError as e:
3942 self.assertEqual(e.exitstatus, errno.ENOENT)
3943 else:
3944 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
3945
3946 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3947 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3948
3949 # verify trash dir is clean.
3950 self._wait_for_trash_empty()
3951
3952 def test_subvolume_user_metadata_remove_for_nonexisting_section(self):
3953 subvolname = self._generate_random_subvolume_name()
3954 group = self._generate_random_group_name()
3955
3956 # create group.
3957 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3958
3959 # create subvolume in group.
3960 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3961
3962 # try to remove value for nonexisting key (as section does not exist)
3963 # Expecting ENOENT exit status because key does not exist
3964 try:
3965 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key", "--group_name", group)
3966 except CommandFailedError as e:
3967 self.assertEqual(e.exitstatus, errno.ENOENT)
3968 else:
3969 self.fail("Expected ENOENT because section does not exist")
3970
3971 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
3972 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
3973
3974 # verify trash dir is clean.
3975 self._wait_for_trash_empty()
3976
3977 def test_subvolume_user_metadata_remove_force(self):
3978 subvolname = self._generate_random_subvolume_name()
3979 group = self._generate_random_group_name()
3980
3981 # create group.
3982 self._fs_cmd("subvolumegroup", "create", self.volname, group)
3983
3984 # create subvolume in group.
3985 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
3986
3987 # set metadata for subvolume.
3988 key = "key"
3989 value = "value"
3990 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
3991
3992 # remove metadata against specified key with --force option.
3993 try:
3994 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group, "--force")
3995 except CommandFailedError:
3996 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
3997
3998 # confirm key is removed by again fetching metadata
3999 try:
4000 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
4001 except CommandFailedError as e:
4002 self.assertEqual(e.exitstatus, errno.ENOENT)
4003 else:
4004 self.fail("Expected ENOENT because key does not exist")
4005
4006 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4007 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4008
4009 # verify trash dir is clean.
4010 self._wait_for_trash_empty()
4011
4012 def test_subvolume_user_metadata_remove_force_for_nonexisting_key(self):
4013 subvolname = self._generate_random_subvolume_name()
4014 group = self._generate_random_group_name()
4015
4016 # create group.
4017 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4018
4019 # create subvolume in group.
4020 self._fs_cmd("subvolume", "create", self.volname, subvolname, "--group_name", group)
4021
4022 # set metadata for subvolume.
4023 key = "key"
4024 value = "value"
4025 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
4026
4027 # remove metadata against specified key.
4028 try:
4029 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group)
4030 except CommandFailedError:
4031 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
4032
4033 # confirm key is removed by again fetching metadata
4034 try:
4035 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
4036 except CommandFailedError as e:
4037 self.assertEqual(e.exitstatus, errno.ENOENT)
4038 else:
4039 self.fail("Expected ENOENT because key does not exist")
4040
4041 # again remove metadata against already removed key with --force option.
4042 try:
4043 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, key, "--group_name", group, "--force")
4044 except CommandFailedError:
4045 self.fail("expected the 'fs subvolume metadata rm' (with --force) command to succeed")
4046
4047 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4048 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4049
4050 # verify trash dir is clean.
4051 self._wait_for_trash_empty()
4052
4053 def test_subvolume_user_metadata_set_and_get_for_legacy_subvolume(self):
4054 subvolname = self._generate_random_subvolume_name()
4055 group = self._generate_random_group_name()
4056
4057 # emulate a old-fashioned subvolume in a custom group
4058 createpath = os.path.join(".", "volumes", group, subvolname)
4059 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
4060
4061 # set metadata for subvolume.
4062 key = "key"
4063 value = "value"
4064 try:
4065 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, key, value, "--group_name", group)
4066 except CommandFailedError:
4067 self.fail("expected the 'fs subvolume metadata set' command to succeed")
4068
4069 # get value for specified key.
4070 try:
4071 ret = self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, key, "--group_name", group)
4072 except CommandFailedError:
4073 self.fail("expected the 'fs subvolume metadata get' command to succeed")
4074
4075 # remove '\n' from returned value.
4076 ret = ret.strip('\n')
4077
4078 # match received value with expected value.
4079 self.assertEqual(value, ret)
4080
4081 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4082 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4083
4084 # verify trash dir is clean.
4085 self._wait_for_trash_empty()
4086
4087 def test_subvolume_user_metadata_list_and_remove_for_legacy_subvolume(self):
4088 subvolname = self._generate_random_subvolume_name()
4089 group = self._generate_random_group_name()
4090
4091 # emulate a old-fashioned subvolume in a custom group
4092 createpath = os.path.join(".", "volumes", group, subvolname)
4093 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
4094
4095 # set metadata for subvolume.
4096 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
4097
4098 for k, v in input_metadata_dict.items():
4099 self._fs_cmd("subvolume", "metadata", "set", self.volname, subvolname, k, v, "--group_name", group)
4100
4101 # list metadata
4102 try:
4103 ret = self._fs_cmd("subvolume", "metadata", "ls", self.volname, subvolname, "--group_name", group)
4104 except CommandFailedError:
4105 self.fail("expected the 'fs subvolume metadata ls' command to succeed")
4106
4107 ret_dict = json.loads(ret)
4108
4109 # compare output with expected output
4110 self.assertDictEqual(input_metadata_dict, ret_dict)
4111
4112 # remove metadata against specified key.
4113 try:
4114 self._fs_cmd("subvolume", "metadata", "rm", self.volname, subvolname, "key_1", "--group_name", group)
4115 except CommandFailedError:
4116 self.fail("expected the 'fs subvolume metadata rm' command to succeed")
4117
4118 # confirm key is removed by again fetching metadata
4119 try:
4120 self._fs_cmd("subvolume", "metadata", "get", self.volname, subvolname, "key_1", "--group_name", group)
4121 except CommandFailedError as e:
4122 self.assertEqual(e.exitstatus, errno.ENOENT)
4123 else:
4124 self.fail("Expected ENOENT because key_1 does not exist")
4125
4126 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
4127 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4128
4129 # verify trash dir is clean.
4130 self._wait_for_trash_empty()
4131
4132 class TestSubvolumeGroupSnapshots(TestVolumesHelper):
4133 """Tests for FS subvolume group snapshot operations."""
4134 @unittest.skip("skipping subvolumegroup snapshot tests")
4135 def test_nonexistent_subvolume_group_snapshot_rm(self):
4136 subvolume = self._generate_random_subvolume_name()
4137 group = self._generate_random_group_name()
4138 snapshot = self._generate_random_snapshot_name()
4139
4140 # create group
4141 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4142
4143 # create subvolume in group
4144 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4145
4146 # snapshot group
4147 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4148
4149 # remove snapshot
4150 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4151
4152 # remove snapshot
4153 try:
4154 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4155 except CommandFailedError as ce:
4156 if ce.exitstatus != errno.ENOENT:
4157 raise
4158 else:
4159 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
4160
4161 # remove subvolume
4162 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4163
4164 # verify trash dir is clean
4165 self._wait_for_trash_empty()
4166
4167 # remove group
4168 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4169
4170 @unittest.skip("skipping subvolumegroup snapshot tests")
4171 def test_subvolume_group_snapshot_create_and_rm(self):
4172 subvolume = self._generate_random_subvolume_name()
4173 group = self._generate_random_group_name()
4174 snapshot = self._generate_random_snapshot_name()
4175
4176 # create group
4177 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4178
4179 # create subvolume in group
4180 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4181
4182 # snapshot group
4183 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4184
4185 # remove snapshot
4186 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4187
4188 # remove subvolume
4189 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4190
4191 # verify trash dir is clean
4192 self._wait_for_trash_empty()
4193
4194 # remove group
4195 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4196
4197 @unittest.skip("skipping subvolumegroup snapshot tests")
4198 def test_subvolume_group_snapshot_idempotence(self):
4199 subvolume = self._generate_random_subvolume_name()
4200 group = self._generate_random_group_name()
4201 snapshot = self._generate_random_snapshot_name()
4202
4203 # create group
4204 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4205
4206 # create subvolume in group
4207 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4208
4209 # snapshot group
4210 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4211
4212 # try creating snapshot w/ same snapshot name -- shoule be idempotent
4213 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4214
4215 # remove snapshot
4216 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot)
4217
4218 # remove subvolume
4219 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4220
4221 # verify trash dir is clean
4222 self._wait_for_trash_empty()
4223
4224 # remove group
4225 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4226
4227 @unittest.skip("skipping subvolumegroup snapshot tests")
4228 def test_subvolume_group_snapshot_ls(self):
4229 # tests the 'fs subvolumegroup snapshot ls' command
4230
4231 snapshots = []
4232
4233 # create group
4234 group = self._generate_random_group_name()
4235 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4236
4237 # create subvolumegroup snapshots
4238 snapshots = self._generate_random_snapshot_name(3)
4239 for snapshot in snapshots:
4240 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4241
4242 subvolgrpsnapshotls = json.loads(self._fs_cmd('subvolumegroup', 'snapshot', 'ls', self.volname, group))
4243 if len(subvolgrpsnapshotls) == 0:
4244 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
4245 else:
4246 snapshotnames = [snapshot['name'] for snapshot in subvolgrpsnapshotls]
4247 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
4248 raise RuntimeError("Error creating or listing subvolume group snapshots")
4249
4250 @unittest.skip("skipping subvolumegroup snapshot tests")
4251 def test_subvolume_group_snapshot_rm_force(self):
4252 # test removing non-existing subvolume group snapshot with --force
4253 group = self._generate_random_group_name()
4254 snapshot = self._generate_random_snapshot_name()
4255 # remove snapshot
4256 try:
4257 self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force")
4258 except CommandFailedError:
4259 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
4260
4261 def test_subvolume_group_snapshot_unsupported_status(self):
4262 group = self._generate_random_group_name()
4263 snapshot = self._generate_random_snapshot_name()
4264
4265 # create group
4266 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4267
4268 # snapshot group
4269 try:
4270 self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot)
4271 except CommandFailedError as ce:
4272 self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create")
4273 else:
4274 self.fail("expected subvolumegroup snapshot create command to fail")
4275
4276 # remove group
4277 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4278
4279
4280 class TestSubvolumeSnapshots(TestVolumesHelper):
4281 """Tests for FS subvolume snapshot operations."""
4282 def test_nonexistent_subvolume_snapshot_rm(self):
4283 subvolume = self._generate_random_subvolume_name()
4284 snapshot = self._generate_random_snapshot_name()
4285
4286 # create subvolume
4287 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4288
4289 # snapshot subvolume
4290 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4291
4292 # remove snapshot
4293 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4294
4295 # remove snapshot again
4296 try:
4297 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4298 except CommandFailedError as ce:
4299 if ce.exitstatus != errno.ENOENT:
4300 raise
4301 else:
4302 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
4303
4304 # remove subvolume
4305 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4306
4307 # verify trash dir is clean
4308 self._wait_for_trash_empty()
4309
4310 def test_subvolume_snapshot_create_and_rm(self):
4311 subvolume = self._generate_random_subvolume_name()
4312 snapshot = self._generate_random_snapshot_name()
4313
4314 # create subvolume
4315 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4316
4317 # snapshot subvolume
4318 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4319
4320 # remove snapshot
4321 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4322
4323 # remove subvolume
4324 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4325
4326 # verify trash dir is clean
4327 self._wait_for_trash_empty()
4328
4329 def test_subvolume_snapshot_create_idempotence(self):
4330 subvolume = self._generate_random_subvolume_name()
4331 snapshot = self._generate_random_snapshot_name()
4332
4333 # create subvolume
4334 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4335
4336 # snapshot subvolume
4337 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4338
4339 # try creating w/ same subvolume snapshot name -- should be idempotent
4340 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4341
4342 # remove snapshot
4343 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4344
4345 # remove subvolume
4346 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4347
4348 # verify trash dir is clean
4349 self._wait_for_trash_empty()
4350
4351 def test_subvolume_snapshot_info(self):
4352
4353 """
4354 tests the 'fs subvolume snapshot info' command
4355 """
4356
4357 snap_md = ["created_at", "data_pool", "has_pending_clones"]
4358
4359 subvolume = self._generate_random_subvolume_name()
4360 snapshot, snap_missing = self._generate_random_snapshot_name(2)
4361
4362 # create subvolume
4363 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
4364
4365 # do some IO
4366 self._do_subvolume_io(subvolume, number_of_files=1)
4367
4368 # snapshot subvolume
4369 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4370
4371 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
4372 for md in snap_md:
4373 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4374 self.assertEqual(snap_info["has_pending_clones"], "no")
4375
4376 # snapshot info for non-existent snapshot
4377 try:
4378 self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing)
4379 except CommandFailedError as ce:
4380 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot")
4381 else:
4382 self.fail("expected snapshot info of non-existent snapshot to fail")
4383
4384 # remove snapshot
4385 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4386
4387 # remove subvolume
4388 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4389
4390 # verify trash dir is clean
4391 self._wait_for_trash_empty()
4392
4393 def test_subvolume_snapshot_in_group(self):
4394 subvolume = self._generate_random_subvolume_name()
4395 group = self._generate_random_group_name()
4396 snapshot = self._generate_random_snapshot_name()
4397
4398 # create group
4399 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4400
4401 # create subvolume in group
4402 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4403
4404 # snapshot subvolume in group
4405 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
4406
4407 # remove snapshot
4408 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
4409
4410 # remove subvolume
4411 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4412
4413 # verify trash dir is clean
4414 self._wait_for_trash_empty()
4415
4416 # remove group
4417 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4418
4419 def test_subvolume_snapshot_ls(self):
4420 # tests the 'fs subvolume snapshot ls' command
4421
4422 snapshots = []
4423
4424 # create subvolume
4425 subvolume = self._generate_random_subvolume_name()
4426 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4427
4428 # create subvolume snapshots
4429 snapshots = self._generate_random_snapshot_name(3)
4430 for snapshot in snapshots:
4431 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4432
4433 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
4434 if len(subvolsnapshotls) == 0:
4435 self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
4436 else:
4437 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
4438 if collections.Counter(snapshotnames) != collections.Counter(snapshots):
4439 self.fail("Error creating or listing subvolume snapshots")
4440
4441 # remove snapshot
4442 for snapshot in snapshots:
4443 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4444
4445 # remove subvolume
4446 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4447
4448 # verify trash dir is clean
4449 self._wait_for_trash_empty()
4450
4451 def test_subvolume_inherited_snapshot_ls(self):
4452 # tests the scenario where 'fs subvolume snapshot ls' command
4453 # should not list inherited snapshots created as part of snapshot
4454 # at ancestral level
4455
4456 snapshots = []
4457 subvolume = self._generate_random_subvolume_name()
4458 group = self._generate_random_group_name()
4459 snap_count = 3
4460
4461 # create group
4462 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4463
4464 # create subvolume in group
4465 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4466
4467 # create subvolume snapshots
4468 snapshots = self._generate_random_snapshot_name(snap_count)
4469 for snapshot in snapshots:
4470 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
4471
4472 # Create snapshot at ancestral level
4473 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1")
4474 ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2")
4475 self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1, ancestral_snappath2], omit_sudo=False)
4476
4477 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group))
4478 self.assertEqual(len(subvolsnapshotls), snap_count)
4479
4480 # remove ancestral snapshots
4481 self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1, ancestral_snappath2], omit_sudo=False)
4482
4483 # remove snapshot
4484 for snapshot in snapshots:
4485 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
4486
4487 # remove subvolume
4488 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4489
4490 # verify trash dir is clean
4491 self._wait_for_trash_empty()
4492
4493 # remove group
4494 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4495
4496 def test_subvolume_inherited_snapshot_info(self):
4497 """
4498 tests the scenario where 'fs subvolume snapshot info' command
4499 should fail for inherited snapshots created as part of snapshot
4500 at ancestral level
4501 """
4502
4503 subvolume = self._generate_random_subvolume_name()
4504 group = self._generate_random_group_name()
4505
4506 # create group
4507 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4508
4509 # create subvolume in group
4510 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4511
4512 # Create snapshot at ancestral level
4513 ancestral_snap_name = "ancestral_snap_1"
4514 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
4515 self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1], omit_sudo=False)
4516
4517 # Validate existence of inherited snapshot
4518 group_path = os.path.join(".", "volumes", group)
4519 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
4520 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
4521 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
4522 self.mount_a.run_shell(['ls', inherited_snappath])
4523
4524 # snapshot info on inherited snapshot
4525 try:
4526 self._get_subvolume_snapshot_info(self.volname, subvolume, inherited_snap, group)
4527 except CommandFailedError as ce:
4528 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on snapshot info of inherited snapshot")
4529 else:
4530 self.fail("expected snapshot info of inherited snapshot to fail")
4531
4532 # remove ancestral snapshots
4533 self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1], omit_sudo=False)
4534
4535 # remove subvolume
4536 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
4537
4538 # verify trash dir is clean
4539 self._wait_for_trash_empty()
4540
4541 # remove group
4542 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4543
4544 def test_subvolume_inherited_snapshot_rm(self):
4545 """
4546 tests the scenario where 'fs subvolume snapshot rm' command
4547 should fail for inherited snapshots created as part of snapshot
4548 at ancestral level
4549 """
4550
4551 subvolume = self._generate_random_subvolume_name()
4552 group = self._generate_random_group_name()
4553
4554 # create group
4555 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4556
4557 # create subvolume in group
4558 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4559
4560 # Create snapshot at ancestral level
4561 ancestral_snap_name = "ancestral_snap_1"
4562 ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
4563 self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1], omit_sudo=False)
4564
4565 # Validate existence of inherited snap
4566 group_path = os.path.join(".", "volumes", group)
4567 inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip())
4568 inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir)
4569 inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap)
4570 self.mount_a.run_shell(['ls', inherited_snappath])
4571
4572 # inherited snapshot should not be deletable
4573 try:
4574 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, inherited_snap, "--group_name", group)
4575 except CommandFailedError as ce:
4576 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when removing inherited snapshot")
4577 else:
4578 self.fail("expected removing inheirted snapshot to fail")
4579
4580 # remove ancestral snapshots
4581 self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1], omit_sudo=False)
4582
4583 # remove subvolume
4584 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4585
4586 # verify trash dir is clean
4587 self._wait_for_trash_empty()
4588
4589 # remove group
4590 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4591
4592 def test_subvolume_subvolumegroup_snapshot_name_conflict(self):
4593 """
4594 tests the scenario where creation of subvolume snapshot name
4595 with same name as it's subvolumegroup snapshot name. This should
4596 fail.
4597 """
4598
4599 subvolume = self._generate_random_subvolume_name()
4600 group = self._generate_random_group_name()
4601 group_snapshot = self._generate_random_snapshot_name()
4602
4603 # create group
4604 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4605
4606 # create subvolume in group
4607 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
4608
4609 # Create subvolumegroup snapshot
4610 group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot)
4611 self.mount_a.run_shell(['sudo', 'mkdir', '-p', group_snapshot_path], omit_sudo=False)
4612
4613 # Validate existence of subvolumegroup snapshot
4614 self.mount_a.run_shell(['ls', group_snapshot_path])
4615
4616 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
4617 try:
4618 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, group_snapshot, "--group_name", group)
4619 except CommandFailedError as ce:
4620 self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
4621 else:
4622 self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
4623
4624 # remove subvolumegroup snapshot
4625 self.mount_a.run_shell(['sudo', 'rmdir', group_snapshot_path], omit_sudo=False)
4626
4627 # remove subvolume
4628 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
4629
4630 # verify trash dir is clean
4631 self._wait_for_trash_empty()
4632
4633 # remove group
4634 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
4635
4636 def test_subvolume_retain_snapshot_invalid_recreate(self):
4637 """
4638 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
4639 """
4640 subvolume = self._generate_random_subvolume_name()
4641 snapshot = self._generate_random_snapshot_name()
4642
4643 # create subvolume
4644 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4645
4646 # snapshot subvolume
4647 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4648
4649 # remove with snapshot retention
4650 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4651
4652 # recreate subvolume with an invalid pool
4653 data_pool = "invalid_pool"
4654 try:
4655 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool)
4656 except CommandFailedError as ce:
4657 self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname")
4658 else:
4659 self.fail("expected recreate of subvolume with invalid poolname to fail")
4660
4661 # fetch info
4662 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4663 self.assertEqual(subvol_info["state"], "snapshot-retained",
4664 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4665
4666 # getpath
4667 try:
4668 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
4669 except CommandFailedError as ce:
4670 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
4671 else:
4672 self.fail("expected getpath of subvolume with retained snapshots to fail")
4673
4674 # remove snapshot (should remove volume)
4675 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4676
4677 # verify trash dir is clean
4678 self._wait_for_trash_empty()
4679
4680 def test_subvolume_retain_snapshot_recreate_subvolume(self):
4681 """
4682 ensure a retained subvolume can be recreated and further snapshotted
4683 """
4684 snap_md = ["created_at", "data_pool", "has_pending_clones"]
4685
4686 subvolume = self._generate_random_subvolume_name()
4687 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
4688
4689 # create subvolume
4690 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4691
4692 # snapshot subvolume
4693 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
4694
4695 # remove with snapshot retention
4696 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4697
4698 # fetch info
4699 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4700 self.assertEqual(subvol_info["state"], "snapshot-retained",
4701 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4702
4703 # recreate retained subvolume
4704 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4705
4706 # fetch info
4707 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4708 self.assertEqual(subvol_info["state"], "complete",
4709 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4710
4711 # snapshot info (older snapshot)
4712 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1))
4713 for md in snap_md:
4714 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4715 self.assertEqual(snap_info["has_pending_clones"], "no")
4716
4717 # snap-create (new snapshot)
4718 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
4719
4720 # remove with retain snapshots
4721 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4722
4723 # list snapshots
4724 subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
4725 self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
4726 " created subvolume snapshots")
4727 snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
4728 for snap in [snapshot1, snapshot2]:
4729 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
4730
4731 # remove snapshots (should remove volume)
4732 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
4733 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
4734
4735 # verify list subvolumes returns an empty list
4736 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4737 self.assertEqual(len(subvolumels), 0)
4738
4739 # verify trash dir is clean
4740 self._wait_for_trash_empty()
4741
4742 def test_subvolume_retain_snapshot_with_snapshots(self):
4743 """
4744 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
4745 also test allowed and dis-allowed operations on a retained subvolume
4746 """
4747 snap_md = ["created_at", "data_pool", "has_pending_clones"]
4748
4749 subvolume = self._generate_random_subvolume_name()
4750 snapshot = self._generate_random_snapshot_name()
4751
4752 # create subvolume
4753 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4754
4755 # snapshot subvolume
4756 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4757
4758 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4759 try:
4760 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4761 except CommandFailedError as ce:
4762 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of retained subvolume with snapshots")
4763 else:
4764 self.fail("expected rm of subvolume with retained snapshots to fail")
4765
4766 # remove with snapshot retention
4767 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4768
4769 # fetch info
4770 subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
4771 self.assertEqual(subvol_info["state"], "snapshot-retained",
4772 msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
4773
4774 ## test allowed ops in retained state
4775 # ls
4776 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4777 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
4778 self.assertEqual(subvolumes[0]['name'], subvolume,
4779 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
4780
4781 # snapshot info
4782 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
4783 for md in snap_md:
4784 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
4785 self.assertEqual(snap_info["has_pending_clones"], "no")
4786
4787 # rm --force (allowed but should fail)
4788 try:
4789 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
4790 except CommandFailedError as ce:
4791 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
4792 else:
4793 self.fail("expected rm of subvolume with retained snapshots to fail")
4794
4795 # rm (allowed but should fail)
4796 try:
4797 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4798 except CommandFailedError as ce:
4799 self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
4800 else:
4801 self.fail("expected rm of subvolume with retained snapshots to fail")
4802
4803 ## test disallowed ops
4804 # getpath
4805 try:
4806 self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
4807 except CommandFailedError as ce:
4808 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
4809 else:
4810 self.fail("expected getpath of subvolume with retained snapshots to fail")
4811
4812 # resize
4813 nsize = self.DEFAULT_FILE_SIZE*1024*1024
4814 try:
4815 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
4816 except CommandFailedError as ce:
4817 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on resize of subvolume with retained snapshots")
4818 else:
4819 self.fail("expected resize of subvolume with retained snapshots to fail")
4820
4821 # snap-create
4822 try:
4823 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, "fail")
4824 except CommandFailedError as ce:
4825 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot create of subvolume with retained snapshots")
4826 else:
4827 self.fail("expected snapshot create of subvolume with retained snapshots to fail")
4828
4829 # remove snapshot (should remove volume)
4830 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4831
4832 # verify list subvolumes returns an empty list
4833 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4834 self.assertEqual(len(subvolumels), 0)
4835
4836 # verify trash dir is clean
4837 self._wait_for_trash_empty()
4838
4839 def test_subvolume_retain_snapshot_without_snapshots(self):
4840 """
4841 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
4842 """
4843 subvolume = self._generate_random_subvolume_name()
4844
4845 # create subvolume
4846 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4847
4848 # remove with snapshot retention (should remove volume, no snapshots to retain)
4849 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4850
4851 # verify list subvolumes returns an empty list
4852 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
4853 self.assertEqual(len(subvolumels), 0)
4854
4855 # verify trash dir is clean
4856 self._wait_for_trash_empty()
4857
4858 def test_subvolume_retain_snapshot_trash_busy_recreate(self):
4859 """
4860 ensure retained subvolume recreate fails if its trash is not yet purged
4861 """
4862 subvolume = self._generate_random_subvolume_name()
4863 snapshot = self._generate_random_snapshot_name()
4864
4865 # create subvolume
4866 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4867
4868 # snapshot subvolume
4869 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4870
4871 # remove with snapshot retention
4872 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
4873
4874 # fake a trash entry
4875 self._update_fake_trash(subvolume)
4876
4877 # recreate subvolume
4878 try:
4879 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4880 except CommandFailedError as ce:
4881 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of subvolume with purge pending")
4882 else:
4883 self.fail("expected recreate of subvolume with purge pending to fail")
4884
4885 # clear fake trash entry
4886 self._update_fake_trash(subvolume, create=False)
4887
4888 # recreate subvolume
4889 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4890
4891 # remove snapshot
4892 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4893
4894 # remove subvolume
4895 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4896
4897 # verify trash dir is clean
4898 self._wait_for_trash_empty()
4899
4900 def test_subvolume_rm_with_snapshots(self):
4901 subvolume = self._generate_random_subvolume_name()
4902 snapshot = self._generate_random_snapshot_name()
4903
4904 # create subvolume
4905 self._fs_cmd("subvolume", "create", self.volname, subvolume)
4906
4907 # snapshot subvolume
4908 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4909
4910 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4911 try:
4912 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4913 except CommandFailedError as ce:
4914 if ce.exitstatus != errno.ENOTEMPTY:
4915 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
4916 else:
4917 raise RuntimeError("expected subvolume deletion to fail")
4918
4919 # remove snapshot
4920 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4921
4922 # remove subvolume
4923 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4924
4925 # verify trash dir is clean
4926 self._wait_for_trash_empty()
4927
4928 def test_subvolume_snapshot_protect_unprotect_sanity(self):
4929 """
4930 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
4931 invoking the command does not cause errors, till they are removed from a subsequent release.
4932 """
4933 subvolume = self._generate_random_subvolume_name()
4934 snapshot = self._generate_random_snapshot_name()
4935 clone = self._generate_random_clone_name()
4936
4937 # create subvolume
4938 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
4939
4940 # do some IO
4941 self._do_subvolume_io(subvolume, number_of_files=64)
4942
4943 # snapshot subvolume
4944 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
4945
4946 # now, protect snapshot
4947 self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot)
4948
4949 # schedule a clone
4950 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
4951
4952 # check clone status
4953 self._wait_for_clone_to_complete(clone)
4954
4955 # now, unprotect snapshot
4956 self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot)
4957
4958 # verify clone
4959 self._verify_clone(subvolume, snapshot, clone)
4960
4961 # remove snapshot
4962 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
4963
4964 # remove subvolumes
4965 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
4966 self._fs_cmd("subvolume", "rm", self.volname, clone)
4967
4968 # verify trash dir is clean
4969 self._wait_for_trash_empty()
4970
4971 def test_subvolume_snapshot_rm_force(self):
4972 # test removing non existing subvolume snapshot with --force
4973 subvolume = self._generate_random_subvolume_name()
4974 snapshot = self._generate_random_snapshot_name()
4975
4976 # remove snapshot
4977 try:
4978 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force")
4979 except CommandFailedError:
4980 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
4981
4982 def test_subvolume_snapshot_metadata_set(self):
4983 """
4984 Set custom metadata for subvolume snapshot.
4985 """
4986 subvolname = self._generate_random_subvolume_name()
4987 group = self._generate_random_group_name()
4988 snapshot = self._generate_random_snapshot_name()
4989
4990 # create group.
4991 self._fs_cmd("subvolumegroup", "create", self.volname, group)
4992
4993 # create subvolume in group.
4994 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
4995
4996 # snapshot subvolume
4997 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
4998
4999 # set metadata for snapshot.
5000 key = "key"
5001 value = "value"
5002 try:
5003 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5004 except CommandFailedError:
5005 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5006
5007 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5008 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5009 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5010
5011 # verify trash dir is clean.
5012 self._wait_for_trash_empty()
5013
5014 def test_subvolume_snapshot_metadata_set_idempotence(self):
5015 """
5016 Set custom metadata for subvolume snapshot (Idempotency).
5017 """
5018 subvolname = self._generate_random_subvolume_name()
5019 group = self._generate_random_group_name()
5020 snapshot = self._generate_random_snapshot_name()
5021
5022 # create group.
5023 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5024
5025 # create subvolume in group.
5026 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5027
5028 # snapshot subvolume
5029 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5030
5031 # set metadata for snapshot.
5032 key = "key"
5033 value = "value"
5034 try:
5035 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5036 except CommandFailedError:
5037 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5038
5039 # set same metadata again for subvolume.
5040 try:
5041 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5042 except CommandFailedError:
5043 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed because it is idempotent operation")
5044
5045 # get value for specified key.
5046 try:
5047 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5048 except CommandFailedError:
5049 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5050
5051 # remove '\n' from returned value.
5052 ret = ret.strip('\n')
5053
5054 # match received value with expected value.
5055 self.assertEqual(value, ret)
5056
5057 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5058 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5059 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5060
5061 # verify trash dir is clean.
5062 self._wait_for_trash_empty()
5063
5064 def test_subvolume_snapshot_metadata_get(self):
5065 """
5066 Get custom metadata for a specified key in subvolume snapshot metadata.
5067 """
5068 subvolname = self._generate_random_subvolume_name()
5069 group = self._generate_random_group_name()
5070 snapshot = self._generate_random_snapshot_name()
5071
5072 # create group.
5073 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5074
5075 # create subvolume in group.
5076 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5077
5078 # snapshot subvolume
5079 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5080
5081 # set metadata for snapshot.
5082 key = "key"
5083 value = "value"
5084 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5085
5086 # get value for specified key.
5087 try:
5088 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5089 except CommandFailedError:
5090 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5091
5092 # remove '\n' from returned value.
5093 ret = ret.strip('\n')
5094
5095 # match received value with expected value.
5096 self.assertEqual(value, ret)
5097
5098 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5099 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5100 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5101
5102 # verify trash dir is clean.
5103 self._wait_for_trash_empty()
5104
5105 def test_subvolume_snapshot_metadata_get_for_nonexisting_key(self):
5106 """
5107 Get custom metadata for subvolume snapshot if specified key not exist in metadata.
5108 """
5109 subvolname = self._generate_random_subvolume_name()
5110 group = self._generate_random_group_name()
5111 snapshot = self._generate_random_snapshot_name()
5112
5113 # create group.
5114 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5115
5116 # create subvolume in group.
5117 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5118
5119 # snapshot subvolume
5120 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5121
5122 # set metadata for snapshot.
5123 key = "key"
5124 value = "value"
5125 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5126
5127 # try to get value for nonexisting key
5128 # Expecting ENOENT exit status because key does not exist
5129 try:
5130 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, "key_nonexist", group)
5131 except CommandFailedError as e:
5132 self.assertEqual(e.exitstatus, errno.ENOENT)
5133 else:
5134 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
5135
5136 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5137 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5138 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5139
5140 # verify trash dir is clean.
5141 self._wait_for_trash_empty()
5142
5143 def test_subvolume_snapshot_metadata_get_for_nonexisting_section(self):
5144 """
5145 Get custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5146 """
5147 subvolname = self._generate_random_subvolume_name()
5148 group = self._generate_random_group_name()
5149 snapshot = self._generate_random_snapshot_name()
5150
5151 # create group.
5152 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5153
5154 # create subvolume in group.
5155 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5156
5157 # snapshot subvolume
5158 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5159
5160 # try to get value for nonexisting key (as section does not exist)
5161 # Expecting ENOENT exit status because key does not exist
5162 try:
5163 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, "key", group)
5164 except CommandFailedError as e:
5165 self.assertEqual(e.exitstatus, errno.ENOENT)
5166 else:
5167 self.fail("Expected ENOENT because section does not exist")
5168
5169 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5170 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5171 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5172
5173 # verify trash dir is clean.
5174 self._wait_for_trash_empty()
5175
5176 def test_subvolume_snapshot_metadata_update(self):
5177 """
5178 Update custom metadata for a specified key in subvolume snapshot metadata.
5179 """
5180 subvolname = self._generate_random_subvolume_name()
5181 group = self._generate_random_group_name()
5182 snapshot = self._generate_random_snapshot_name()
5183
5184 # create group.
5185 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5186
5187 # create subvolume in group.
5188 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5189
5190 # snapshot subvolume
5191 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5192
5193 # set metadata for snapshot.
5194 key = "key"
5195 value = "value"
5196 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5197
5198 # update metadata against key.
5199 new_value = "new_value"
5200 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, new_value, group)
5201
5202 # get metadata for specified key of snapshot.
5203 try:
5204 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5205 except CommandFailedError:
5206 self.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5207
5208 # remove '\n' from returned value.
5209 ret = ret.strip('\n')
5210
5211 # match received value with expected value.
5212 self.assertEqual(new_value, ret)
5213
5214 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5215 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5216 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5217
5218 # verify trash dir is clean.
5219 self._wait_for_trash_empty()
5220
5221 def test_subvolume_snapshot_metadata_list(self):
5222 """
5223 List custom metadata for subvolume snapshot.
5224 """
5225 subvolname = self._generate_random_subvolume_name()
5226 group = self._generate_random_group_name()
5227 snapshot = self._generate_random_snapshot_name()
5228
5229 # create group.
5230 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5231
5232 # create subvolume in group.
5233 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5234
5235 # snapshot subvolume
5236 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5237
5238 # set metadata for subvolume.
5239 input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
5240
5241 for k, v in input_metadata_dict.items():
5242 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, k, v, group)
5243
5244 # list metadata
5245 try:
5246 ret_dict = json.loads(self._fs_cmd("subvolume", "snapshot", "metadata", "ls", self.volname, subvolname, snapshot, group))
5247 except CommandFailedError:
5248 self.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5249
5250 # compare output with expected output
5251 self.assertDictEqual(input_metadata_dict, ret_dict)
5252
5253 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5254 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5255 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5256
5257 # verify trash dir is clean.
5258 self._wait_for_trash_empty()
5259
5260 def test_subvolume_snapshot_metadata_list_if_no_metadata_set(self):
5261 """
5262 List custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5263 """
5264 subvolname = self._generate_random_subvolume_name()
5265 group = self._generate_random_group_name()
5266 snapshot = self._generate_random_snapshot_name()
5267
5268 # create group.
5269 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5270
5271 # create subvolume in group.
5272 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5273
5274 # snapshot subvolume
5275 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5276
5277 # list metadata
5278 try:
5279 ret_dict = json.loads(self._fs_cmd("subvolume", "snapshot", "metadata", "ls", self.volname, subvolname, snapshot, group))
5280 except CommandFailedError:
5281 self.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5282
5283 # compare output with expected output
5284 empty_dict = {}
5285 self.assertDictEqual(ret_dict, empty_dict)
5286
5287 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5288 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5289 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5290
5291 # verify trash dir is clean.
5292 self._wait_for_trash_empty()
5293
5294 def test_subvolume_snapshot_metadata_remove(self):
5295 """
5296 Remove custom metadata for a specified key in subvolume snapshot metadata.
5297 """
5298 subvolname = self._generate_random_subvolume_name()
5299 group = self._generate_random_group_name()
5300 snapshot = self._generate_random_snapshot_name()
5301
5302 # create group.
5303 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5304
5305 # create subvolume in group.
5306 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5307
5308 # snapshot subvolume
5309 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5310
5311 # set metadata for snapshot.
5312 key = "key"
5313 value = "value"
5314 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5315
5316 # remove metadata against specified key.
5317 try:
5318 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group)
5319 except CommandFailedError:
5320 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5321
5322 # confirm key is removed by again fetching metadata
5323 try:
5324 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, key, snapshot, group)
5325 except CommandFailedError as e:
5326 self.assertEqual(e.exitstatus, errno.ENOENT)
5327 else:
5328 self.fail("Expected ENOENT because key does not exist")
5329
5330 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5331 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5332 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5333
5334 # verify trash dir is clean.
5335 self._wait_for_trash_empty()
5336
5337 def test_subvolume_snapshot_metadata_remove_for_nonexisting_key(self):
5338 """
5339 Remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5340 """
5341 subvolname = self._generate_random_subvolume_name()
5342 group = self._generate_random_group_name()
5343 snapshot = self._generate_random_snapshot_name()
5344
5345 # create group.
5346 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5347
5348 # create subvolume in group.
5349 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5350
5351 # snapshot subvolume
5352 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5353
5354 # set metadata for snapshot.
5355 key = "key"
5356 value = "value"
5357 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5358
5359 # try to remove value for nonexisting key
5360 # Expecting ENOENT exit status because key does not exist
5361 try:
5362 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, "key_nonexist", group)
5363 except CommandFailedError as e:
5364 self.assertEqual(e.exitstatus, errno.ENOENT)
5365 else:
5366 self.fail("Expected ENOENT because 'key_nonexist' does not exist")
5367
5368 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5369 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5370 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5371
5372 # verify trash dir is clean.
5373 self._wait_for_trash_empty()
5374
5375 def test_subvolume_snapshot_metadata_remove_for_nonexisting_section(self):
5376 """
5377 Remove custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5378 """
5379 subvolname = self._generate_random_subvolume_name()
5380 group = self._generate_random_group_name()
5381 snapshot = self._generate_random_snapshot_name()
5382
5383 # create group.
5384 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5385
5386 # create subvolume in group.
5387 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5388
5389 # snapshot subvolume
5390 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5391
5392 # try to remove value for nonexisting key (as section does not exist)
5393 # Expecting ENOENT exit status because key does not exist
5394 try:
5395 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, "key", group)
5396 except CommandFailedError as e:
5397 self.assertEqual(e.exitstatus, errno.ENOENT)
5398 else:
5399 self.fail("Expected ENOENT because section does not exist")
5400
5401 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5402 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5403 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5404
5405 # verify trash dir is clean.
5406 self._wait_for_trash_empty()
5407
5408 def test_subvolume_snapshot_metadata_remove_force(self):
5409 """
5410 Forcefully remove custom metadata for a specified key in subvolume snapshot metadata.
5411 """
5412 subvolname = self._generate_random_subvolume_name()
5413 group = self._generate_random_group_name()
5414 snapshot = self._generate_random_snapshot_name()
5415
5416 # create group.
5417 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5418
5419 # create subvolume in group.
5420 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5421
5422 # snapshot subvolume
5423 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5424
5425 # set metadata for snapshot.
5426 key = "key"
5427 value = "value"
5428 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5429
5430 # remove metadata against specified key with --force option.
5431 try:
5432 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group, "--force")
5433 except CommandFailedError:
5434 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5435
5436 # confirm key is removed by again fetching metadata
5437 try:
5438 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5439 except CommandFailedError as e:
5440 self.assertEqual(e.exitstatus, errno.ENOENT)
5441 else:
5442 self.fail("Expected ENOENT because key does not exist")
5443
5444 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5445 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5446 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5447
5448 # verify trash dir is clean.
5449 self._wait_for_trash_empty()
5450
5451 def test_subvolume_snapshot_metadata_remove_force_for_nonexisting_key(self):
5452 """
5453 Forcefully remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5454 """
5455 subvolname = self._generate_random_subvolume_name()
5456 group = self._generate_random_group_name()
5457 snapshot = self._generate_random_snapshot_name()
5458
5459 # create group.
5460 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5461
5462 # create subvolume in group.
5463 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5464
5465 # snapshot subvolume
5466 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5467
5468 # set metadata for snapshot.
5469 key = "key"
5470 value = "value"
5471 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5472
5473 # remove metadata against specified key.
5474 try:
5475 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group)
5476 except CommandFailedError:
5477 self.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5478
5479 # confirm key is removed by again fetching metadata
5480 try:
5481 self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5482 except CommandFailedError as e:
5483 self.assertEqual(e.exitstatus, errno.ENOENT)
5484 else:
5485 self.fail("Expected ENOENT because key does not exist")
5486
5487 # again remove metadata against already removed key with --force option.
5488 try:
5489 self._fs_cmd("subvolume", "snapshot", "metadata", "rm", self.volname, subvolname, snapshot, key, group, "--force")
5490 except CommandFailedError:
5491 self.fail("expected the 'fs subvolume snapshot metadata rm' (with --force) command to succeed")
5492
5493 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5494 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5495 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5496
5497 # verify trash dir is clean.
5498 self._wait_for_trash_empty()
5499
5500 def test_subvolume_snapshot_metadata_after_snapshot_remove(self):
5501 """
5502 Verify metadata removal of subvolume snapshot after snapshot removal.
5503 """
5504 subvolname = self._generate_random_subvolume_name()
5505 group = self._generate_random_group_name()
5506 snapshot = self._generate_random_snapshot_name()
5507
5508 # create group.
5509 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5510
5511 # create subvolume in group.
5512 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5513
5514 # snapshot subvolume
5515 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5516
5517 # set metadata for snapshot.
5518 key = "key"
5519 value = "value"
5520 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5521
5522 # get value for specified key.
5523 ret = self._fs_cmd("subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group)
5524
5525 # remove '\n' from returned value.
5526 ret = ret.strip('\n')
5527
5528 # match received value with expected value.
5529 self.assertEqual(value, ret)
5530
5531 # remove subvolume snapshot.
5532 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5533
5534 # try to get metadata after removing snapshot.
5535 # Expecting error ENOENT with error message of snapshot does not exist
5536 cmd_ret = self.mgr_cluster.mon_manager.run_cluster_cmd(
5537 args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group],
5538 check_status=False, stdout=StringIO(), stderr=StringIO())
5539 self.assertEqual(cmd_ret.returncode, errno.ENOENT, "Expecting ENOENT error")
5540 self.assertIn(f"snapshot '{snapshot}' does not exist", cmd_ret.stderr.getvalue(),
5541 f"Expecting message: snapshot '{snapshot}' does not exist ")
5542
5543 # confirm metadata is removed by searching section name in .meta file
5544 meta_path = os.path.join(".", "volumes", group, subvolname, ".meta")
5545 section_name = "SNAP_METADATA_" + snapshot
5546
5547 try:
5548 self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False)
5549 except CommandFailedError as e:
5550 self.assertNotEqual(e.exitstatus, 0)
5551 else:
5552 self.fail("Expected non-zero exist status because section should not exist")
5553
5554 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5555 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5556
5557 # verify trash dir is clean.
5558 self._wait_for_trash_empty()
5559
5560 def test_clean_stale_subvolume_snapshot_metadata(self):
5561 """
5562 Validate cleaning of stale subvolume snapshot metadata.
5563 """
5564 subvolname = self._generate_random_subvolume_name()
5565 group = self._generate_random_group_name()
5566 snapshot = self._generate_random_snapshot_name()
5567
5568 # create group.
5569 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5570
5571 # create subvolume in group.
5572 self._fs_cmd("subvolume", "create", self.volname, subvolname, group)
5573
5574 # snapshot subvolume
5575 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolname, snapshot, group)
5576
5577 # set metadata for snapshot.
5578 key = "key"
5579 value = "value"
5580 try:
5581 self._fs_cmd("subvolume", "snapshot", "metadata", "set", self.volname, subvolname, snapshot, key, value, group)
5582 except CommandFailedError:
5583 self.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5584
5585 # save the subvolume config file.
5586 meta_path = os.path.join(".", "volumes", group, subvolname, ".meta")
5587 tmp_meta_path = os.path.join(".", "volumes", group, subvolname, ".meta.stale_snap_section")
5588 self.mount_a.run_shell(['sudo', 'cp', '-p', meta_path, tmp_meta_path], omit_sudo=False)
5589
5590 # Delete snapshot, this would remove user snap metadata
5591 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolname, snapshot, group)
5592
5593 # Copy back saved subvolume config file. This would have stale snapshot metadata
5594 self.mount_a.run_shell(['sudo', 'cp', '-p', tmp_meta_path, meta_path], omit_sudo=False)
5595
5596 # Verify that it has stale snapshot metadata
5597 section_name = "SNAP_METADATA_" + snapshot
5598 try:
5599 self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False)
5600 except CommandFailedError:
5601 self.fail("Expected grep cmd to succeed because stale snapshot metadata exist")
5602
5603 # Do any subvolume operation to clean the stale snapshot metadata
5604 _ = json.loads(self._get_subvolume_info(self.volname, subvolname, group))
5605
5606 # Verify that the stale snapshot metadata is cleaned
5607 try:
5608 self.mount_a.run_shell(f"sudo grep {section_name} {meta_path}", omit_sudo=False)
5609 except CommandFailedError as e:
5610 self.assertNotEqual(e.exitstatus, 0)
5611 else:
5612 self.fail("Expected non-zero exist status because stale snapshot metadata should not exist")
5613
5614 self._fs_cmd("subvolume", "rm", self.volname, subvolname, group)
5615 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5616
5617 # verify trash dir is clean.
5618 self._wait_for_trash_empty()
5619 # Clean tmp config file
5620 self.mount_a.run_shell(['sudo', 'rm', '-f', tmp_meta_path], omit_sudo=False)
5621
5622
5623 class TestSubvolumeSnapshotClones(TestVolumesHelper):
5624 """ Tests for FS subvolume snapshot clone operations."""
5625 def test_clone_subvolume_info(self):
5626 # tests the 'fs subvolume info' command for a clone
5627 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
5628 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
5629 "type", "uid"]
5630
5631 subvolume = self._generate_random_subvolume_name()
5632 snapshot = self._generate_random_snapshot_name()
5633 clone = self._generate_random_clone_name()
5634
5635 # create subvolume
5636 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5637
5638 # do some IO
5639 self._do_subvolume_io(subvolume, number_of_files=1)
5640
5641 # snapshot subvolume
5642 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5643
5644 # schedule a clone
5645 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5646
5647 # check clone status
5648 self._wait_for_clone_to_complete(clone)
5649
5650 # remove snapshot
5651 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5652
5653 subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
5654 if len(subvol_info) == 0:
5655 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
5656 for md in subvol_md:
5657 if md not in subvol_info.keys():
5658 raise RuntimeError("%s not present in the metadata of subvolume" % md)
5659 if subvol_info["type"] != "clone":
5660 raise RuntimeError("type should be set to clone")
5661
5662 # remove subvolumes
5663 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5664 self._fs_cmd("subvolume", "rm", self.volname, clone)
5665
5666 # verify trash dir is clean
5667 self._wait_for_trash_empty()
5668
5669 def test_subvolume_snapshot_info_without_snapshot_clone(self):
5670 """
5671 Verify subvolume snapshot info output without cloning snapshot.
5672 If no clone is performed then path /volumes/_index/clone/{track_id}
5673 will not exist.
5674 """
5675 subvolume = self._generate_random_subvolume_name()
5676 snapshot = self._generate_random_snapshot_name()
5677
5678 # create subvolume.
5679 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5680
5681 # snapshot subvolume
5682 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5683
5684 # list snapshot info
5685 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5686
5687 # verify snapshot info
5688 self.assertEqual(result['has_pending_clones'], "no")
5689 self.assertFalse('orphan_clones_count' in result)
5690 self.assertFalse('pending_clones' in result)
5691
5692 # remove snapshot, subvolume, clone
5693 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5694 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5695
5696 # verify trash dir is clean
5697 self._wait_for_trash_empty()
5698
5699 def test_subvolume_snapshot_info_if_no_clone_pending(self):
5700 """
5701 Verify subvolume snapshot info output if no clone is in pending state.
5702 """
5703 subvolume = self._generate_random_subvolume_name()
5704 snapshot = self._generate_random_snapshot_name()
5705 clone_list = [f'clone_{i}' for i in range(3)]
5706
5707 # create subvolume.
5708 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5709
5710 # snapshot subvolume
5711 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5712
5713 # schedule a clones
5714 for clone in clone_list:
5715 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5716
5717 # check clones status
5718 for clone in clone_list:
5719 self._wait_for_clone_to_complete(clone)
5720
5721 # list snapshot info
5722 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5723
5724 # verify snapshot info
5725 self.assertEqual(result['has_pending_clones'], "no")
5726 self.assertFalse('orphan_clones_count' in result)
5727 self.assertFalse('pending_clones' in result)
5728
5729 # remove snapshot, subvolume, clone
5730 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5731 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5732 for clone in clone_list:
5733 self._fs_cmd("subvolume", "rm", self.volname, clone)
5734
5735 # verify trash dir is clean
5736 self._wait_for_trash_empty()
5737
5738 def test_subvolume_snapshot_info_if_clone_pending_for_no_group(self):
5739 """
5740 Verify subvolume snapshot info output if clones are in pending state.
5741 Clones are not specified for particular target_group. Hence target_group
5742 should not be in the output as we don't show _nogroup (default group)
5743 """
5744 subvolume = self._generate_random_subvolume_name()
5745 snapshot = self._generate_random_snapshot_name()
5746 clone_list = [f'clone_{i}' for i in range(3)]
5747
5748 # create subvolume.
5749 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5750
5751 # snapshot subvolume
5752 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5753
5754 # insert delay at the beginning of snapshot clone
5755 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5756
5757 # schedule a clones
5758 for clone in clone_list:
5759 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5760
5761 # list snapshot info
5762 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5763
5764 # verify snapshot info
5765 expected_clone_list = []
5766 for clone in clone_list:
5767 expected_clone_list.append({"name": clone})
5768 self.assertEqual(result['has_pending_clones'], "yes")
5769 self.assertFalse('orphan_clones_count' in result)
5770 self.assertListEqual(result['pending_clones'], expected_clone_list)
5771 self.assertEqual(len(result['pending_clones']), 3)
5772
5773 # check clones status
5774 for clone in clone_list:
5775 self._wait_for_clone_to_complete(clone)
5776
5777 # remove snapshot, subvolume, clone
5778 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5779 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5780 for clone in clone_list:
5781 self._fs_cmd("subvolume", "rm", self.volname, clone)
5782
5783 # verify trash dir is clean
5784 self._wait_for_trash_empty()
5785
5786 def test_subvolume_snapshot_info_if_clone_pending_for_target_group(self):
5787 """
5788 Verify subvolume snapshot info output if clones are in pending state.
5789 Clones are not specified for target_group.
5790 """
5791 subvolume = self._generate_random_subvolume_name()
5792 snapshot = self._generate_random_snapshot_name()
5793 clone = self._generate_random_clone_name()
5794 group = self._generate_random_group_name()
5795 target_group = self._generate_random_group_name()
5796
5797 # create groups
5798 self._fs_cmd("subvolumegroup", "create", self.volname, group)
5799 self._fs_cmd("subvolumegroup", "create", self.volname, target_group)
5800
5801 # create subvolume
5802 self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777")
5803
5804 # snapshot subvolume
5805 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
5806
5807 # insert delay at the beginning of snapshot clone
5808 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5809
5810 # schedule a clone
5811 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
5812 "--group_name", group, "--target_group_name", target_group)
5813
5814 # list snapshot info
5815 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot, "--group_name", group))
5816
5817 # verify snapshot info
5818 expected_clone_list = [{"name": clone, "target_group": target_group}]
5819 self.assertEqual(result['has_pending_clones'], "yes")
5820 self.assertFalse('orphan_clones_count' in result)
5821 self.assertListEqual(result['pending_clones'], expected_clone_list)
5822 self.assertEqual(len(result['pending_clones']), 1)
5823
5824 # check clone status
5825 self._wait_for_clone_to_complete(clone, clone_group=target_group)
5826
5827 # remove snapshot
5828 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
5829
5830 # remove subvolumes
5831 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
5832 self._fs_cmd("subvolume", "rm", self.volname, clone, target_group)
5833
5834 # remove groups
5835 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
5836 self._fs_cmd("subvolumegroup", "rm", self.volname, target_group)
5837
5838 # verify trash dir is clean
5839 self._wait_for_trash_empty()
5840
5841 def test_subvolume_snapshot_info_if_orphan_clone(self):
5842 """
5843 Verify subvolume snapshot info output if orphan clones exists.
5844 Orphan clones should not list under pending clones.
5845 orphan_clones_count should display correct count of orphan clones'
5846 """
5847 subvolume = self._generate_random_subvolume_name()
5848 snapshot = self._generate_random_snapshot_name()
5849 clone_list = [f'clone_{i}' for i in range(3)]
5850
5851 # create subvolume.
5852 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
5853
5854 # snapshot subvolume
5855 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5856
5857 # insert delay at the beginning of snapshot clone
5858 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 15)
5859
5860 # schedule a clones
5861 for clone in clone_list:
5862 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5863
5864 # remove track file for third clone to make it orphan
5865 meta_path = os.path.join(".", "volumes", "_nogroup", subvolume, ".meta")
5866 pending_clones_result = self.mount_a.run_shell(['sudo', 'grep', 'clone snaps', '-A3', meta_path], omit_sudo=False, stdout=StringIO(), stderr=StringIO())
5867 third_clone_track_id = pending_clones_result.stdout.getvalue().splitlines()[3].split(" = ")[0]
5868 third_clone_track_path = os.path.join(".", "volumes", "_index", "clone", third_clone_track_id)
5869 self.mount_a.run_shell(f"sudo rm -f {third_clone_track_path}", omit_sudo=False)
5870
5871 # list snapshot info
5872 result = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5873
5874 # verify snapshot info
5875 expected_clone_list = []
5876 for i in range(len(clone_list)-1):
5877 expected_clone_list.append({"name": clone_list[i]})
5878 self.assertEqual(result['has_pending_clones'], "yes")
5879 self.assertEqual(result['orphan_clones_count'], 1)
5880 self.assertListEqual(result['pending_clones'], expected_clone_list)
5881 self.assertEqual(len(result['pending_clones']), 2)
5882
5883 # check clones status
5884 for i in range(len(clone_list)-1):
5885 self._wait_for_clone_to_complete(clone_list[i])
5886
5887 # list snapshot info after cloning completion
5888 res = json.loads(self._fs_cmd("subvolume", "snapshot", "info", self.volname, subvolume, snapshot))
5889
5890 # verify snapshot info (has_pending_clones should be no)
5891 self.assertEqual(res['has_pending_clones'], "no")
5892
5893 def test_non_clone_status(self):
5894 subvolume = self._generate_random_subvolume_name()
5895
5896 # create subvolume
5897 self._fs_cmd("subvolume", "create", self.volname, subvolume)
5898
5899 try:
5900 self._fs_cmd("clone", "status", self.volname, subvolume)
5901 except CommandFailedError as ce:
5902 if ce.exitstatus != errno.ENOTSUP:
5903 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
5904 else:
5905 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
5906
5907 # remove subvolume
5908 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5909
5910 # verify trash dir is clean
5911 self._wait_for_trash_empty()
5912
5913 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self):
5914 subvolume = self._generate_random_subvolume_name()
5915 snapshot = self._generate_random_snapshot_name()
5916 clone = self._generate_random_clone_name()
5917 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
5918
5919 # create subvolume, in an isolated namespace with a specified size
5920 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize), "--mode=777")
5921
5922 # do some IO
5923 self._do_subvolume_io(subvolume, number_of_files=8)
5924
5925 # snapshot subvolume
5926 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5927
5928 # create a pool different from current subvolume pool
5929 subvol_path = self._get_subvolume_path(self.volname, subvolume)
5930 default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
5931 new_pool = "new_pool"
5932 self.assertNotEqual(default_pool, new_pool)
5933 self.fs.add_data_pool(new_pool)
5934
5935 # update source subvolume pool
5936 self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="")
5937
5938 # schedule a clone, with NO --pool specification
5939 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5940
5941 # check clone status
5942 self._wait_for_clone_to_complete(clone)
5943
5944 # verify clone
5945 self._verify_clone(subvolume, snapshot, clone)
5946
5947 # remove snapshot
5948 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5949
5950 # remove subvolumes
5951 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
5952 self._fs_cmd("subvolume", "rm", self.volname, clone)
5953
5954 # verify trash dir is clean
5955 self._wait_for_trash_empty()
5956
5957 def test_subvolume_clone_inherit_quota_attrs(self):
5958 subvolume = self._generate_random_subvolume_name()
5959 snapshot = self._generate_random_snapshot_name()
5960 clone = self._generate_random_clone_name()
5961 osize = self.DEFAULT_FILE_SIZE*1024*1024*12
5962
5963 # create subvolume with a specified size
5964 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777", "--size", str(osize))
5965
5966 # do some IO
5967 self._do_subvolume_io(subvolume, number_of_files=8)
5968
5969 # get subvolume path
5970 subvolpath = self._get_subvolume_path(self.volname, subvolume)
5971
5972 # set quota on number of files
5973 self.mount_a.setfattr(subvolpath, 'ceph.quota.max_files', "20", sudo=True)
5974
5975 # snapshot subvolume
5976 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
5977
5978 # schedule a clone
5979 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
5980
5981 # check clone status
5982 self._wait_for_clone_to_complete(clone)
5983
5984 # verify clone
5985 self._verify_clone(subvolume, snapshot, clone)
5986
5987 # get subvolume path
5988 clonepath = self._get_subvolume_path(self.volname, clone)
5989
5990 # verify quota max_files is inherited from source snapshot
5991 subvol_quota = self.mount_a.getfattr(subvolpath, "ceph.quota.max_files")
5992 clone_quota = self.mount_a.getfattr(clonepath, "ceph.quota.max_files")
5993 self.assertEqual(subvol_quota, clone_quota)
5994
5995 # remove snapshot
5996 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
5997
5998 # remove subvolumes
5999 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6000 self._fs_cmd("subvolume", "rm", self.volname, clone)
6001
6002 # verify trash dir is clean
6003 self._wait_for_trash_empty()
6004
6005 def test_subvolume_clone_in_progress_getpath(self):
6006 subvolume = self._generate_random_subvolume_name()
6007 snapshot = self._generate_random_snapshot_name()
6008 clone = self._generate_random_clone_name()
6009
6010 # create subvolume
6011 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6012
6013 # do some IO
6014 self._do_subvolume_io(subvolume, number_of_files=64)
6015
6016 # snapshot subvolume
6017 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6018
6019 # Insert delay at the beginning of snapshot clone
6020 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6021
6022 # schedule a clone
6023 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6024
6025 # clone should not be accessible right now
6026 try:
6027 self._get_subvolume_path(self.volname, clone)
6028 except CommandFailedError as ce:
6029 if ce.exitstatus != errno.EAGAIN:
6030 raise RuntimeError("invalid error code when fetching path of an pending clone")
6031 else:
6032 raise RuntimeError("expected fetching path of an pending clone to fail")
6033
6034 # check clone status
6035 self._wait_for_clone_to_complete(clone)
6036
6037 # clone should be accessible now
6038 subvolpath = self._get_subvolume_path(self.volname, clone)
6039 self.assertNotEqual(subvolpath, None)
6040
6041 # verify clone
6042 self._verify_clone(subvolume, snapshot, clone)
6043
6044 # remove snapshot
6045 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6046
6047 # remove subvolumes
6048 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6049 self._fs_cmd("subvolume", "rm", self.volname, clone)
6050
6051 # verify trash dir is clean
6052 self._wait_for_trash_empty()
6053
6054 def test_subvolume_clone_in_progress_snapshot_rm(self):
6055 subvolume = self._generate_random_subvolume_name()
6056 snapshot = self._generate_random_snapshot_name()
6057 clone = self._generate_random_clone_name()
6058
6059 # create subvolume
6060 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6061
6062 # do some IO
6063 self._do_subvolume_io(subvolume, number_of_files=64)
6064
6065 # snapshot subvolume
6066 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6067
6068 # Insert delay at the beginning of snapshot clone
6069 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6070
6071 # schedule a clone
6072 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6073
6074 # snapshot should not be deletable now
6075 try:
6076 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6077 except CommandFailedError as ce:
6078 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
6079 else:
6080 self.fail("expected removing source snapshot of a clone to fail")
6081
6082 # check clone status
6083 self._wait_for_clone_to_complete(clone)
6084
6085 # clone should be accessible now
6086 subvolpath = self._get_subvolume_path(self.volname, clone)
6087 self.assertNotEqual(subvolpath, None)
6088
6089 # verify clone
6090 self._verify_clone(subvolume, snapshot, clone)
6091
6092 # remove snapshot
6093 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6094
6095 # remove subvolumes
6096 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6097 self._fs_cmd("subvolume", "rm", self.volname, clone)
6098
6099 # verify trash dir is clean
6100 self._wait_for_trash_empty()
6101
6102 def test_subvolume_clone_in_progress_source(self):
6103 subvolume = self._generate_random_subvolume_name()
6104 snapshot = self._generate_random_snapshot_name()
6105 clone = self._generate_random_clone_name()
6106
6107 # create subvolume
6108 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6109
6110 # do some IO
6111 self._do_subvolume_io(subvolume, number_of_files=64)
6112
6113 # snapshot subvolume
6114 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6115
6116 # Insert delay at the beginning of snapshot clone
6117 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6118
6119 # schedule a clone
6120 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6121
6122 # verify clone source
6123 result = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
6124 source = result['status']['source']
6125 self.assertEqual(source['volume'], self.volname)
6126 self.assertEqual(source['subvolume'], subvolume)
6127 self.assertEqual(source.get('group', None), None)
6128 self.assertEqual(source['snapshot'], snapshot)
6129
6130 # check clone status
6131 self._wait_for_clone_to_complete(clone)
6132
6133 # clone should be accessible now
6134 subvolpath = self._get_subvolume_path(self.volname, clone)
6135 self.assertNotEqual(subvolpath, None)
6136
6137 # verify clone
6138 self._verify_clone(subvolume, snapshot, clone)
6139
6140 # remove snapshot
6141 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6142
6143 # remove subvolumes
6144 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6145 self._fs_cmd("subvolume", "rm", self.volname, clone)
6146
6147 # verify trash dir is clean
6148 self._wait_for_trash_empty()
6149
6150 def test_subvolume_clone_retain_snapshot_with_snapshots(self):
6151 """
6152 retain snapshots of a cloned subvolume and check disallowed operations
6153 """
6154 subvolume = self._generate_random_subvolume_name()
6155 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
6156 clone = self._generate_random_clone_name()
6157
6158 # create subvolume
6159 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6160
6161 # store path for clone verification
6162 subvol1_path = self._get_subvolume_path(self.volname, subvolume)
6163
6164 # do some IO
6165 self._do_subvolume_io(subvolume, number_of_files=16)
6166
6167 # snapshot subvolume
6168 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
6169
6170 # remove with snapshot retention
6171 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6172
6173 # clone retained subvolume snapshot
6174 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot1, clone)
6175
6176 # check clone status
6177 self._wait_for_clone_to_complete(clone)
6178
6179 # verify clone
6180 self._verify_clone(subvolume, snapshot1, clone, subvol_path=subvol1_path)
6181
6182 # create a snapshot on the clone
6183 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot2)
6184
6185 # retain a clone
6186 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
6187
6188 # list snapshots
6189 clonesnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, clone))
6190 self.assertEqual(len(clonesnapshotls), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
6191 " created subvolume snapshots")
6192 snapshotnames = [snapshot['name'] for snapshot in clonesnapshotls]
6193 for snap in [snapshot2]:
6194 self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
6195
6196 ## check disallowed operations on retained clone
6197 # clone-status
6198 try:
6199 self._fs_cmd("clone", "status", self.volname, clone)
6200 except CommandFailedError as ce:
6201 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone status of clone with retained snapshots")
6202 else:
6203 self.fail("expected clone status of clone with retained snapshots to fail")
6204
6205 # clone-cancel
6206 try:
6207 self._fs_cmd("clone", "cancel", self.volname, clone)
6208 except CommandFailedError as ce:
6209 self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone cancel of clone with retained snapshots")
6210 else:
6211 self.fail("expected clone cancel of clone with retained snapshots to fail")
6212
6213 # remove snapshots (removes subvolumes as all are in retained state)
6214 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
6215 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot2)
6216
6217 # verify list subvolumes returns an empty list
6218 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6219 self.assertEqual(len(subvolumels), 0)
6220
6221 # verify trash dir is clean
6222 self._wait_for_trash_empty()
6223
6224 def test_subvolume_retain_snapshot_clone(self):
6225 """
6226 clone a snapshot from a snapshot retained subvolume
6227 """
6228 subvolume = self._generate_random_subvolume_name()
6229 snapshot = self._generate_random_snapshot_name()
6230 clone = self._generate_random_clone_name()
6231
6232 # create subvolume
6233 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6234
6235 # store path for clone verification
6236 subvol_path = self._get_subvolume_path(self.volname, subvolume)
6237
6238 # do some IO
6239 self._do_subvolume_io(subvolume, number_of_files=16)
6240
6241 # snapshot subvolume
6242 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6243
6244 # remove with snapshot retention
6245 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6246
6247 # clone retained subvolume snapshot
6248 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6249
6250 # check clone status
6251 self._wait_for_clone_to_complete(clone)
6252
6253 # verify clone
6254 self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path)
6255
6256 # remove snapshots (removes retained volume)
6257 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6258
6259 # remove subvolume
6260 self._fs_cmd("subvolume", "rm", self.volname, clone)
6261
6262 # verify list subvolumes returns an empty list
6263 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6264 self.assertEqual(len(subvolumels), 0)
6265
6266 # verify trash dir is clean
6267 self._wait_for_trash_empty()
6268
6269 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self):
6270 """
6271 clone a subvolume from recreated subvolume's latest snapshot
6272 """
6273 subvolume = self._generate_random_subvolume_name()
6274 snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
6275 clone = self._generate_random_clone_name(1)
6276
6277 # create subvolume
6278 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6279
6280 # do some IO
6281 self._do_subvolume_io(subvolume, number_of_files=16)
6282
6283 # snapshot subvolume
6284 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
6285
6286 # remove with snapshot retention
6287 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6288
6289 # recreate subvolume
6290 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6291
6292 # get and store path for clone verification
6293 subvol2_path = self._get_subvolume_path(self.volname, subvolume)
6294
6295 # do some IO
6296 self._do_subvolume_io(subvolume, number_of_files=16)
6297
6298 # snapshot newer subvolume
6299 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
6300
6301 # remove with snapshot retention
6302 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6303
6304 # clone retained subvolume's newer snapshot
6305 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot2, clone)
6306
6307 # check clone status
6308 self._wait_for_clone_to_complete(clone)
6309
6310 # verify clone
6311 self._verify_clone(subvolume, snapshot2, clone, subvol_path=subvol2_path)
6312
6313 # remove snapshot
6314 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
6315 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
6316
6317 # remove subvolume
6318 self._fs_cmd("subvolume", "rm", self.volname, clone)
6319
6320 # verify list subvolumes returns an empty list
6321 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6322 self.assertEqual(len(subvolumels), 0)
6323
6324 # verify trash dir is clean
6325 self._wait_for_trash_empty()
6326
6327 def test_subvolume_retain_snapshot_recreate(self):
6328 """
6329 recreate a subvolume from one of its retained snapshots
6330 """
6331 subvolume = self._generate_random_subvolume_name()
6332 snapshot = self._generate_random_snapshot_name()
6333
6334 # create subvolume
6335 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6336
6337 # store path for clone verification
6338 subvol_path = self._get_subvolume_path(self.volname, subvolume)
6339
6340 # do some IO
6341 self._do_subvolume_io(subvolume, number_of_files=16)
6342
6343 # snapshot subvolume
6344 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6345
6346 # remove with snapshot retention
6347 self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
6348
6349 # recreate retained subvolume using its own snapshot to clone
6350 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume)
6351
6352 # check clone status
6353 self._wait_for_clone_to_complete(subvolume)
6354
6355 # verify clone
6356 self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path)
6357
6358 # remove snapshot
6359 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6360
6361 # remove subvolume
6362 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6363
6364 # verify list subvolumes returns an empty list
6365 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
6366 self.assertEqual(len(subvolumels), 0)
6367
6368 # verify trash dir is clean
6369 self._wait_for_trash_empty()
6370
6371 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self):
6372 """
6373 ensure retained clone recreate fails if its trash is not yet purged
6374 """
6375 subvolume = self._generate_random_subvolume_name()
6376 snapshot = self._generate_random_snapshot_name()
6377 clone = self._generate_random_clone_name()
6378
6379 # create subvolume
6380 self._fs_cmd("subvolume", "create", self.volname, subvolume)
6381
6382 # snapshot subvolume
6383 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6384
6385 # clone subvolume snapshot
6386 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6387
6388 # check clone status
6389 self._wait_for_clone_to_complete(clone)
6390
6391 # snapshot clone
6392 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot)
6393
6394 # remove clone with snapshot retention
6395 self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
6396
6397 # fake a trash entry
6398 self._update_fake_trash(clone)
6399
6400 # clone subvolume snapshot (recreate)
6401 try:
6402 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6403 except CommandFailedError as ce:
6404 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending")
6405 else:
6406 self.fail("expected recreate of clone with purge pending to fail")
6407
6408 # clear fake trash entry
6409 self._update_fake_trash(clone, create=False)
6410
6411 # recreate subvolume
6412 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6413
6414 # check clone status
6415 self._wait_for_clone_to_complete(clone)
6416
6417 # remove snapshot
6418 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6419 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot)
6420
6421 # remove subvolume
6422 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6423 self._fs_cmd("subvolume", "rm", self.volname, clone)
6424
6425 # verify trash dir is clean
6426 self._wait_for_trash_empty()
6427
6428 def test_subvolume_snapshot_attr_clone(self):
6429 subvolume = self._generate_random_subvolume_name()
6430 snapshot = self._generate_random_snapshot_name()
6431 clone = self._generate_random_clone_name()
6432
6433 # create subvolume
6434 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6435
6436 # do some IO
6437 self._do_subvolume_io_mixed(subvolume)
6438
6439 # snapshot subvolume
6440 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6441
6442 # schedule a clone
6443 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6444
6445 # check clone status
6446 self._wait_for_clone_to_complete(clone)
6447
6448 # verify clone
6449 self._verify_clone(subvolume, snapshot, clone)
6450
6451 # remove snapshot
6452 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6453
6454 # remove subvolumes
6455 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6456 self._fs_cmd("subvolume", "rm", self.volname, clone)
6457
6458 # verify trash dir is clean
6459 self._wait_for_trash_empty()
6460
6461 def test_clone_failure_status_pending_in_progress_complete(self):
6462 """
6463 ensure failure status is not shown when clone is not in failed/cancelled state
6464 """
6465 subvolume = self._generate_random_subvolume_name()
6466 snapshot = self._generate_random_snapshot_name()
6467 clone1 = self._generate_random_clone_name()
6468
6469 # create subvolume
6470 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6471
6472 # do some IO
6473 self._do_subvolume_io(subvolume, number_of_files=200)
6474
6475 # snapshot subvolume
6476 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6477
6478 # Insert delay at the beginning of snapshot clone
6479 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6480
6481 # schedule a clone1
6482 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6483
6484 # pending clone shouldn't show failure status
6485 clone1_result = self._get_clone_status(clone1)
6486 try:
6487 clone1_result["status"]["failure"]["errno"]
6488 except KeyError as e:
6489 self.assertEqual(str(e), "'failure'")
6490 else:
6491 self.fail("clone status shouldn't show failure for pending clone")
6492
6493 # check clone1 to be in-progress
6494 self._wait_for_clone_to_be_in_progress(clone1)
6495
6496 # in-progress clone1 shouldn't show failure status
6497 clone1_result = self._get_clone_status(clone1)
6498 try:
6499 clone1_result["status"]["failure"]["errno"]
6500 except KeyError as e:
6501 self.assertEqual(str(e), "'failure'")
6502 else:
6503 self.fail("clone status shouldn't show failure for in-progress clone")
6504
6505 # wait for clone1 to complete
6506 self._wait_for_clone_to_complete(clone1)
6507
6508 # complete clone1 shouldn't show failure status
6509 clone1_result = self._get_clone_status(clone1)
6510 try:
6511 clone1_result["status"]["failure"]["errno"]
6512 except KeyError as e:
6513 self.assertEqual(str(e), "'failure'")
6514 else:
6515 self.fail("clone status shouldn't show failure for complete clone")
6516
6517 # remove snapshot
6518 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6519
6520 # remove subvolumes
6521 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6522 self._fs_cmd("subvolume", "rm", self.volname, clone1)
6523
6524 # verify trash dir is clean
6525 self._wait_for_trash_empty()
6526
6527 def test_clone_failure_status_failed(self):
6528 """
6529 ensure failure status is shown when clone is in failed state and validate the reason
6530 """
6531 subvolume = self._generate_random_subvolume_name()
6532 snapshot = self._generate_random_snapshot_name()
6533 clone1 = self._generate_random_clone_name()
6534
6535 # create subvolume
6536 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6537
6538 # do some IO
6539 self._do_subvolume_io(subvolume, number_of_files=200)
6540
6541 # snapshot subvolume
6542 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6543
6544 # Insert delay at the beginning of snapshot clone
6545 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6546
6547 # schedule a clone1
6548 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6549
6550 # remove snapshot from backend to force the clone failure.
6551 snappath = os.path.join(".", "volumes", "_nogroup", subvolume, ".snap", snapshot)
6552 self.mount_a.run_shell(['sudo', 'rmdir', snappath], omit_sudo=False)
6553
6554 # wait for clone1 to fail.
6555 self._wait_for_clone_to_fail(clone1)
6556
6557 # check clone1 status
6558 clone1_result = self._get_clone_status(clone1)
6559 self.assertEqual(clone1_result["status"]["state"], "failed")
6560 self.assertEqual(clone1_result["status"]["failure"]["errno"], "2")
6561 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "snapshot '{0}' does not exist".format(snapshot))
6562
6563 # clone removal should succeed after failure, remove clone1
6564 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
6565
6566 # remove subvolumes
6567 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6568
6569 # verify trash dir is clean
6570 self._wait_for_trash_empty()
6571
6572 def test_clone_failure_status_pending_cancelled(self):
6573 """
6574 ensure failure status is shown when clone is cancelled during pending state and validate the reason
6575 """
6576 subvolume = self._generate_random_subvolume_name()
6577 snapshot = self._generate_random_snapshot_name()
6578 clone1 = self._generate_random_clone_name()
6579
6580 # create subvolume
6581 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6582
6583 # do some IO
6584 self._do_subvolume_io(subvolume, number_of_files=200)
6585
6586 # snapshot subvolume
6587 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6588
6589 # Insert delay at the beginning of snapshot clone
6590 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6591
6592 # schedule a clone1
6593 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6594
6595 # cancel pending clone1
6596 self._fs_cmd("clone", "cancel", self.volname, clone1)
6597
6598 # check clone1 status
6599 clone1_result = self._get_clone_status(clone1)
6600 self.assertEqual(clone1_result["status"]["state"], "canceled")
6601 self.assertEqual(clone1_result["status"]["failure"]["errno"], "4")
6602 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "user interrupted clone operation")
6603
6604 # clone removal should succeed with force after cancelled, remove clone1
6605 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
6606
6607 # remove snapshot
6608 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6609
6610 # remove subvolumes
6611 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6612
6613 # verify trash dir is clean
6614 self._wait_for_trash_empty()
6615
6616 def test_clone_failure_status_in_progress_cancelled(self):
6617 """
6618 ensure failure status is shown when clone is cancelled during in-progress state and validate the reason
6619 """
6620 subvolume = self._generate_random_subvolume_name()
6621 snapshot = self._generate_random_snapshot_name()
6622 clone1 = self._generate_random_clone_name()
6623
6624 # create subvolume
6625 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6626
6627 # do some IO
6628 self._do_subvolume_io(subvolume, number_of_files=200)
6629
6630 # snapshot subvolume
6631 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6632
6633 # Insert delay at the beginning of snapshot clone
6634 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6635
6636 # schedule a clone1
6637 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6638
6639 # wait for clone1 to be in-progress
6640 self._wait_for_clone_to_be_in_progress(clone1)
6641
6642 # cancel in-progess clone1
6643 self._fs_cmd("clone", "cancel", self.volname, clone1)
6644
6645 # check clone1 status
6646 clone1_result = self._get_clone_status(clone1)
6647 self.assertEqual(clone1_result["status"]["state"], "canceled")
6648 self.assertEqual(clone1_result["status"]["failure"]["errno"], "4")
6649 self.assertEqual(clone1_result["status"]["failure"]["error_msg"], "user interrupted clone operation")
6650
6651 # clone removal should succeed with force after cancelled, remove clone1
6652 self._fs_cmd("subvolume", "rm", self.volname, clone1, "--force")
6653
6654 # remove snapshot
6655 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6656
6657 # remove subvolumes
6658 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6659
6660 # verify trash dir is clean
6661 self._wait_for_trash_empty()
6662
6663 def test_subvolume_snapshot_clone(self):
6664 subvolume = self._generate_random_subvolume_name()
6665 snapshot = self._generate_random_snapshot_name()
6666 clone = self._generate_random_clone_name()
6667
6668 # create subvolume
6669 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6670
6671 # do some IO
6672 self._do_subvolume_io(subvolume, number_of_files=64)
6673
6674 # snapshot subvolume
6675 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6676
6677 # schedule a clone
6678 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6679
6680 # check clone status
6681 self._wait_for_clone_to_complete(clone)
6682
6683 # verify clone
6684 self._verify_clone(subvolume, snapshot, clone)
6685
6686 # remove snapshot
6687 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6688
6689 # remove subvolumes
6690 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6691 self._fs_cmd("subvolume", "rm", self.volname, clone)
6692
6693 # verify trash dir is clean
6694 self._wait_for_trash_empty()
6695
6696 def test_subvolume_snapshot_clone_quota_exceeded(self):
6697 subvolume = self._generate_random_subvolume_name()
6698 snapshot = self._generate_random_snapshot_name()
6699 clone = self._generate_random_clone_name()
6700
6701 # create subvolume with 20MB quota
6702 osize = self.DEFAULT_FILE_SIZE*1024*1024*20
6703 self._fs_cmd("subvolume", "create", self.volname, subvolume,"--mode=777", "--size", str(osize))
6704
6705 # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
6706 try:
6707 self._do_subvolume_io(subvolume, number_of_files=50)
6708 except CommandFailedError:
6709 # ignore quota enforcement error.
6710 pass
6711
6712 # snapshot subvolume
6713 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6714
6715 # schedule a clone
6716 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6717
6718 # check clone status
6719 self._wait_for_clone_to_complete(clone)
6720
6721 # verify clone
6722 self._verify_clone(subvolume, snapshot, clone)
6723
6724 # remove snapshot
6725 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6726
6727 # remove subvolumes
6728 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6729 self._fs_cmd("subvolume", "rm", self.volname, clone)
6730
6731 # verify trash dir is clean
6732 self._wait_for_trash_empty()
6733
6734 def test_subvolume_snapshot_in_complete_clone_rm(self):
6735 """
6736 Validates the removal of clone when it is not in 'complete|cancelled|failed' state.
6737 The forceful removl of subvolume clone succeeds only if it's in any of the
6738 'complete|cancelled|failed' states. It fails with EAGAIN in any other states.
6739 """
6740
6741 subvolume = self._generate_random_subvolume_name()
6742 snapshot = self._generate_random_snapshot_name()
6743 clone = self._generate_random_clone_name()
6744
6745 # create subvolume
6746 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6747
6748 # do some IO
6749 self._do_subvolume_io(subvolume, number_of_files=64)
6750
6751 # snapshot subvolume
6752 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6753
6754 # Insert delay at the beginning of snapshot clone
6755 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6756
6757 # schedule a clone
6758 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6759
6760 # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled.
6761 try:
6762 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6763 except CommandFailedError as ce:
6764 if ce.exitstatus != errno.EAGAIN:
6765 raise RuntimeError("invalid error code when trying to remove failed clone")
6766 else:
6767 raise RuntimeError("expected error when removing a failed clone")
6768
6769 # cancel on-going clone
6770 self._fs_cmd("clone", "cancel", self.volname, clone)
6771
6772 # verify canceled state
6773 self._check_clone_canceled(clone)
6774
6775 # clone removal should succeed after cancel
6776 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6777
6778 # remove snapshot
6779 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6780
6781 # remove subvolumes
6782 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6783
6784 # verify trash dir is clean
6785 self._wait_for_trash_empty()
6786
6787 def test_subvolume_snapshot_clone_retain_suid_guid(self):
6788 subvolume = self._generate_random_subvolume_name()
6789 snapshot = self._generate_random_snapshot_name()
6790 clone = self._generate_random_clone_name()
6791
6792 # create subvolume
6793 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6794
6795 # Create a file with suid, guid bits set along with executable bit.
6796 args = ["subvolume", "getpath", self.volname, subvolume]
6797 args = tuple(args)
6798 subvolpath = self._fs_cmd(*args)
6799 self.assertNotEqual(subvolpath, None)
6800 subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline
6801
6802 file_path = subvolpath
6803 file_path = os.path.join(subvolpath, "test_suid_file")
6804 self.mount_a.run_shell(["touch", file_path])
6805 self.mount_a.run_shell(["chmod", "u+sx,g+sx", file_path])
6806
6807 # snapshot subvolume
6808 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6809
6810 # schedule a clone
6811 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6812
6813 # check clone status
6814 self._wait_for_clone_to_complete(clone)
6815
6816 # verify clone
6817 self._verify_clone(subvolume, snapshot, clone)
6818
6819 # remove snapshot
6820 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6821
6822 # remove subvolumes
6823 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6824 self._fs_cmd("subvolume", "rm", self.volname, clone)
6825
6826 # verify trash dir is clean
6827 self._wait_for_trash_empty()
6828
6829 def test_subvolume_snapshot_clone_and_reclone(self):
6830 subvolume = self._generate_random_subvolume_name()
6831 snapshot = self._generate_random_snapshot_name()
6832 clone1, clone2 = self._generate_random_clone_name(2)
6833
6834 # create subvolume
6835 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6836
6837 # do some IO
6838 self._do_subvolume_io(subvolume, number_of_files=32)
6839
6840 # snapshot subvolume
6841 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6842
6843 # schedule a clone
6844 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
6845
6846 # check clone status
6847 self._wait_for_clone_to_complete(clone1)
6848
6849 # verify clone
6850 self._verify_clone(subvolume, snapshot, clone1)
6851
6852 # remove snapshot
6853 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6854
6855 # now the clone is just like a normal subvolume -- snapshot the clone and fork
6856 # another clone. before that do some IO so it's can be differentiated.
6857 self._do_subvolume_io(clone1, create_dir="data", number_of_files=32)
6858
6859 # snapshot clone -- use same snap name
6860 self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot)
6861
6862 # schedule a clone
6863 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2)
6864
6865 # check clone status
6866 self._wait_for_clone_to_complete(clone2)
6867
6868 # verify clone
6869 self._verify_clone(clone1, snapshot, clone2)
6870
6871 # remove snapshot
6872 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot)
6873
6874 # remove subvolumes
6875 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6876 self._fs_cmd("subvolume", "rm", self.volname, clone1)
6877 self._fs_cmd("subvolume", "rm", self.volname, clone2)
6878
6879 # verify trash dir is clean
6880 self._wait_for_trash_empty()
6881
6882 def test_subvolume_snapshot_clone_cancel_in_progress(self):
6883 subvolume = self._generate_random_subvolume_name()
6884 snapshot = self._generate_random_snapshot_name()
6885 clone = self._generate_random_clone_name()
6886
6887 # create subvolume
6888 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6889
6890 # do some IO
6891 self._do_subvolume_io(subvolume, number_of_files=128)
6892
6893 # snapshot subvolume
6894 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6895
6896 # Insert delay at the beginning of snapshot clone
6897 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6898
6899 # schedule a clone
6900 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6901
6902 # cancel on-going clone
6903 self._fs_cmd("clone", "cancel", self.volname, clone)
6904
6905 # verify canceled state
6906 self._check_clone_canceled(clone)
6907
6908 # remove snapshot
6909 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6910
6911 # remove subvolumes
6912 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6913 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6914
6915 # verify trash dir is clean
6916 self._wait_for_trash_empty()
6917
6918 def test_subvolume_snapshot_clone_cancel_pending(self):
6919 """
6920 this test is a bit more involved compared to canceling an in-progress clone.
6921 we'd need to ensure that a to-be canceled clone has still not been picked up
6922 by cloner threads. exploit the fact that clones are picked up in an FCFS
6923 fashion and there are four (4) cloner threads by default. When the number of
6924 cloner threads increase, this test _may_ start tripping -- so, the number of
6925 clone operations would need to be jacked up.
6926 """
6927 # default number of clone threads
6928 NR_THREADS = 4
6929 # good enough for 4 threads
6930 NR_CLONES = 5
6931 # yeh, 1gig -- we need the clone to run for sometime
6932 FILE_SIZE_MB = 1024
6933
6934 subvolume = self._generate_random_subvolume_name()
6935 snapshot = self._generate_random_snapshot_name()
6936 clones = self._generate_random_clone_name(NR_CLONES)
6937
6938 # create subvolume
6939 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
6940
6941 # do some IO
6942 self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB)
6943
6944 # snapshot subvolume
6945 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
6946
6947 # schedule clones
6948 for clone in clones:
6949 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
6950
6951 to_wait = clones[0:NR_THREADS]
6952 to_cancel = clones[NR_THREADS:]
6953
6954 # cancel pending clones and verify
6955 for clone in to_cancel:
6956 status = json.loads(self._fs_cmd("clone", "status", self.volname, clone))
6957 self.assertEqual(status["status"]["state"], "pending")
6958 self._fs_cmd("clone", "cancel", self.volname, clone)
6959 self._check_clone_canceled(clone)
6960
6961 # let's cancel on-going clones. handle the case where some of the clones
6962 # _just_ complete
6963 for clone in list(to_wait):
6964 try:
6965 self._fs_cmd("clone", "cancel", self.volname, clone)
6966 to_cancel.append(clone)
6967 to_wait.remove(clone)
6968 except CommandFailedError as ce:
6969 if ce.exitstatus != errno.EINVAL:
6970 raise RuntimeError("invalid error code when cancelling on-going clone")
6971
6972 # remove snapshot
6973 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
6974
6975 # remove subvolumes
6976 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
6977 for clone in to_wait:
6978 self._fs_cmd("subvolume", "rm", self.volname, clone)
6979 for clone in to_cancel:
6980 self._fs_cmd("subvolume", "rm", self.volname, clone, "--force")
6981
6982 # verify trash dir is clean
6983 self._wait_for_trash_empty()
6984
6985 def test_subvolume_snapshot_clone_different_groups(self):
6986 subvolume = self._generate_random_subvolume_name()
6987 snapshot = self._generate_random_snapshot_name()
6988 clone = self._generate_random_clone_name()
6989 s_group, c_group = self._generate_random_group_name(2)
6990
6991 # create groups
6992 self._fs_cmd("subvolumegroup", "create", self.volname, s_group)
6993 self._fs_cmd("subvolumegroup", "create", self.volname, c_group)
6994
6995 # create subvolume
6996 self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group, "--mode=777")
6997
6998 # do some IO
6999 self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32)
7000
7001 # snapshot subvolume
7002 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group)
7003
7004 # schedule a clone
7005 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone,
7006 '--group_name', s_group, '--target_group_name', c_group)
7007
7008 # check clone status
7009 self._wait_for_clone_to_complete(clone, clone_group=c_group)
7010
7011 # verify clone
7012 self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group)
7013
7014 # remove snapshot
7015 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group)
7016
7017 # remove subvolumes
7018 self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group)
7019 self._fs_cmd("subvolume", "rm", self.volname, clone, c_group)
7020
7021 # remove groups
7022 self._fs_cmd("subvolumegroup", "rm", self.volname, s_group)
7023 self._fs_cmd("subvolumegroup", "rm", self.volname, c_group)
7024
7025 # verify trash dir is clean
7026 self._wait_for_trash_empty()
7027
7028 def test_subvolume_snapshot_clone_fail_with_remove(self):
7029 subvolume = self._generate_random_subvolume_name()
7030 snapshot = self._generate_random_snapshot_name()
7031 clone1, clone2 = self._generate_random_clone_name(2)
7032
7033 pool_capacity = 32 * 1024 * 1024
7034 # number of files required to fill up 99% of the pool
7035 nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
7036
7037 # create subvolume
7038 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
7039
7040 # do some IO
7041 self._do_subvolume_io(subvolume, number_of_files=nr_files)
7042
7043 # snapshot subvolume
7044 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7045
7046 # add data pool
7047 new_pool = "new_pool"
7048 self.fs.add_data_pool(new_pool)
7049
7050 self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
7051 "max_bytes", "{0}".format(pool_capacity // 4))
7052
7053 # schedule a clone
7054 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
7055
7056 # check clone status -- this should dramatically overshoot the pool quota
7057 self._wait_for_clone_to_complete(clone1)
7058
7059 # verify clone
7060 self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool)
7061
7062 # wait a bit so that subsequent I/O will give pool full error
7063 time.sleep(120)
7064
7065 # schedule a clone
7066 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool)
7067
7068 # check clone status
7069 self._wait_for_clone_to_fail(clone2)
7070
7071 # remove snapshot
7072 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7073
7074 # remove subvolumes
7075 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7076 self._fs_cmd("subvolume", "rm", self.volname, clone1)
7077 try:
7078 self._fs_cmd("subvolume", "rm", self.volname, clone2)
7079 except CommandFailedError as ce:
7080 if ce.exitstatus != errno.EAGAIN:
7081 raise RuntimeError("invalid error code when trying to remove failed clone")
7082 else:
7083 raise RuntimeError("expected error when removing a failed clone")
7084
7085 # ... and with force, failed clone can be removed
7086 self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force")
7087
7088 # verify trash dir is clean
7089 self._wait_for_trash_empty()
7090
7091 def test_subvolume_snapshot_clone_on_existing_subvolumes(self):
7092 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
7093 snapshot = self._generate_random_snapshot_name()
7094 clone = self._generate_random_clone_name()
7095
7096 # create subvolumes
7097 self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--mode=777")
7098 self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--mode=777")
7099
7100 # do some IO
7101 self._do_subvolume_io(subvolume1, number_of_files=32)
7102
7103 # snapshot subvolume
7104 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot)
7105
7106 # schedule a clone with target as subvolume2
7107 try:
7108 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2)
7109 except CommandFailedError as ce:
7110 if ce.exitstatus != errno.EEXIST:
7111 raise RuntimeError("invalid error code when cloning to existing subvolume")
7112 else:
7113 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
7114
7115 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
7116
7117 # schedule a clone with target as clone
7118 try:
7119 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone)
7120 except CommandFailedError as ce:
7121 if ce.exitstatus != errno.EEXIST:
7122 raise RuntimeError("invalid error code when cloning to existing clone")
7123 else:
7124 raise RuntimeError("expected cloning to fail if the target is an existing clone")
7125
7126 # check clone status
7127 self._wait_for_clone_to_complete(clone)
7128
7129 # verify clone
7130 self._verify_clone(subvolume1, snapshot, clone)
7131
7132 # remove snapshot
7133 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot)
7134
7135 # remove subvolumes
7136 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7137 self._fs_cmd("subvolume", "rm", self.volname, subvolume2)
7138 self._fs_cmd("subvolume", "rm", self.volname, clone)
7139
7140 # verify trash dir is clean
7141 self._wait_for_trash_empty()
7142
7143 def test_subvolume_snapshot_clone_pool_layout(self):
7144 subvolume = self._generate_random_subvolume_name()
7145 snapshot = self._generate_random_snapshot_name()
7146 clone = self._generate_random_clone_name()
7147
7148 # add data pool
7149 new_pool = "new_pool"
7150 newid = self.fs.add_data_pool(new_pool)
7151
7152 # create subvolume
7153 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
7154
7155 # do some IO
7156 self._do_subvolume_io(subvolume, number_of_files=32)
7157
7158 # snapshot subvolume
7159 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7160
7161 # schedule a clone
7162 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool)
7163
7164 # check clone status
7165 self._wait_for_clone_to_complete(clone)
7166
7167 # verify clone
7168 self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool)
7169
7170 # remove snapshot
7171 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7172
7173 subvol_path = self._get_subvolume_path(self.volname, clone)
7174 desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool")
7175 try:
7176 self.assertEqual(desired_pool, new_pool)
7177 except AssertionError:
7178 self.assertEqual(int(desired_pool), newid) # old kernel returns id
7179
7180 # remove subvolumes
7181 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7182 self._fs_cmd("subvolume", "rm", self.volname, clone)
7183
7184 # verify trash dir is clean
7185 self._wait_for_trash_empty()
7186
7187 def test_subvolume_snapshot_clone_under_group(self):
7188 subvolume = self._generate_random_subvolume_name()
7189 snapshot = self._generate_random_snapshot_name()
7190 clone = self._generate_random_clone_name()
7191 group = self._generate_random_group_name()
7192
7193 # create subvolume
7194 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
7195
7196 # do some IO
7197 self._do_subvolume_io(subvolume, number_of_files=32)
7198
7199 # snapshot subvolume
7200 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7201
7202 # create group
7203 self._fs_cmd("subvolumegroup", "create", self.volname, group)
7204
7205 # schedule a clone
7206 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group)
7207
7208 # check clone status
7209 self._wait_for_clone_to_complete(clone, clone_group=group)
7210
7211 # verify clone
7212 self._verify_clone(subvolume, snapshot, clone, clone_group=group)
7213
7214 # remove snapshot
7215 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7216
7217 # remove subvolumes
7218 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7219 self._fs_cmd("subvolume", "rm", self.volname, clone, group)
7220
7221 # remove group
7222 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7223
7224 # verify trash dir is clean
7225 self._wait_for_trash_empty()
7226
7227 def test_subvolume_snapshot_clone_with_attrs(self):
7228 subvolume = self._generate_random_subvolume_name()
7229 snapshot = self._generate_random_snapshot_name()
7230 clone = self._generate_random_clone_name()
7231
7232 mode = "777"
7233 uid = "1000"
7234 gid = "1000"
7235 new_uid = "1001"
7236 new_gid = "1001"
7237 new_mode = "700"
7238
7239 # create subvolume
7240 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
7241
7242 # do some IO
7243 self._do_subvolume_io(subvolume, number_of_files=32)
7244
7245 # snapshot subvolume
7246 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7247
7248 # change subvolume attrs (to ensure clone picks up snapshot attrs)
7249 self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode)
7250
7251 # schedule a clone
7252 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
7253
7254 # check clone status
7255 self._wait_for_clone_to_complete(clone)
7256
7257 # verify clone
7258 self._verify_clone(subvolume, snapshot, clone)
7259
7260 # remove snapshot
7261 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7262
7263 # remove subvolumes
7264 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7265 self._fs_cmd("subvolume", "rm", self.volname, clone)
7266
7267 # verify trash dir is clean
7268 self._wait_for_trash_empty()
7269
7270 def test_subvolume_snapshot_clone_with_upgrade(self):
7271 """
7272 yet another poor man's upgrade test -- rather than going through a full
7273 upgrade cycle, emulate old types subvolumes by going through the wormhole
7274 and verify clone operation.
7275 further ensure that a legacy volume is not updated to v2, but clone is.
7276 """
7277 subvolume = self._generate_random_subvolume_name()
7278 snapshot = self._generate_random_snapshot_name()
7279 clone = self._generate_random_clone_name()
7280
7281 # emulate a old-fashioned subvolume
7282 createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
7283 self.mount_a.run_shell_payload(f"sudo mkdir -p -m 777 {createpath}", omit_sudo=False)
7284
7285 # add required xattrs to subvolume
7286 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7287 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
7288
7289 # do some IO
7290 self._do_subvolume_io(subvolume, number_of_files=64)
7291
7292 # snapshot subvolume
7293 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7294
7295 # ensure metadata file is in legacy location, with required version v1
7296 self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
7297
7298 # Insert delay at the beginning of snapshot clone
7299 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7300
7301 # schedule a clone
7302 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
7303
7304 # snapshot should not be deletable now
7305 try:
7306 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7307 except CommandFailedError as ce:
7308 self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone")
7309 else:
7310 self.fail("expected removing source snapshot of a clone to fail")
7311
7312 # check clone status
7313 self._wait_for_clone_to_complete(clone)
7314
7315 # verify clone
7316 self._verify_clone(subvolume, snapshot, clone, source_version=1)
7317
7318 # remove snapshot
7319 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7320
7321 # ensure metadata file is in v2 location, with required version v2
7322 self._assert_meta_location_and_version(self.volname, clone)
7323
7324 # remove subvolumes
7325 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7326 self._fs_cmd("subvolume", "rm", self.volname, clone)
7327
7328 # verify trash dir is clean
7329 self._wait_for_trash_empty()
7330
7331 def test_subvolume_snapshot_reconf_max_concurrent_clones(self):
7332 """
7333 Validate 'max_concurrent_clones' config option
7334 """
7335
7336 # get the default number of cloner threads
7337 default_max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7338 self.assertEqual(default_max_concurrent_clones, 4)
7339
7340 # Increase number of cloner threads
7341 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
7342 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7343 self.assertEqual(max_concurrent_clones, 6)
7344
7345 # Decrease number of cloner threads
7346 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7347 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7348 self.assertEqual(max_concurrent_clones, 2)
7349
7350 def test_subvolume_snapshot_config_snapshot_clone_delay(self):
7351 """
7352 Validate 'snapshot_clone_delay' config option
7353 """
7354
7355 # get the default delay before starting the clone
7356 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7357 self.assertEqual(default_timeout, 0)
7358
7359 # Insert delay of 2 seconds at the beginning of the snapshot clone
7360 self.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7361 default_timeout = int(self.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7362 self.assertEqual(default_timeout, 2)
7363
7364 # Decrease number of cloner threads
7365 self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7366 max_concurrent_clones = int(self.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7367 self.assertEqual(max_concurrent_clones, 2)
7368
7369 def test_subvolume_under_group_snapshot_clone(self):
7370 subvolume = self._generate_random_subvolume_name()
7371 group = self._generate_random_group_name()
7372 snapshot = self._generate_random_snapshot_name()
7373 clone = self._generate_random_clone_name()
7374
7375 # create group
7376 self._fs_cmd("subvolumegroup", "create", self.volname, group)
7377
7378 # create subvolume
7379 self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777")
7380
7381 # do some IO
7382 self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32)
7383
7384 # snapshot subvolume
7385 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group)
7386
7387 # schedule a clone
7388 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group)
7389
7390 # check clone status
7391 self._wait_for_clone_to_complete(clone)
7392
7393 # verify clone
7394 self._verify_clone(subvolume, snapshot, clone, source_group=group)
7395
7396 # remove snapshot
7397 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group)
7398
7399 # remove subvolumes
7400 self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
7401 self._fs_cmd("subvolume", "rm", self.volname, clone)
7402
7403 # remove group
7404 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7405
7406 # verify trash dir is clean
7407 self._wait_for_trash_empty()
7408
7409
7410 class TestMisc(TestVolumesHelper):
7411 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
7412 def test_connection_expiration(self):
7413 # unmount any cephfs mounts
7414 for i in range(0, self.CLIENTS_REQUIRED):
7415 self.mounts[i].umount_wait()
7416 sessions = self._session_list()
7417 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
7418
7419 # Get the mgr to definitely mount cephfs
7420 subvolume = self._generate_random_subvolume_name()
7421 self._fs_cmd("subvolume", "create", self.volname, subvolume)
7422 sessions = self._session_list()
7423 self.assertEqual(len(sessions), 1)
7424
7425 # Now wait for the mgr to expire the connection:
7426 self.wait_until_evicted(sessions[0]['id'], timeout=90)
7427
7428 def test_mgr_eviction(self):
7429 # unmount any cephfs mounts
7430 for i in range(0, self.CLIENTS_REQUIRED):
7431 self.mounts[i].umount_wait()
7432 sessions = self._session_list()
7433 self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted
7434
7435 # Get the mgr to definitely mount cephfs
7436 subvolume = self._generate_random_subvolume_name()
7437 self._fs_cmd("subvolume", "create", self.volname, subvolume)
7438 sessions = self._session_list()
7439 self.assertEqual(len(sessions), 1)
7440
7441 # Now fail the mgr, check the session was evicted
7442 mgr = self.mgr_cluster.get_active_id()
7443 self.mgr_cluster.mgr_fail(mgr)
7444 self.wait_until_evicted(sessions[0]['id'])
7445
7446 def test_names_can_only_be_goodchars(self):
7447 """
7448 Test the creating vols, subvols subvolgroups fails when their names uses
7449 characters beyond [a-zA-Z0-9 -_.].
7450 """
7451 volname, badname = 'testvol', 'abcd@#'
7452
7453 with self.assertRaises(CommandFailedError):
7454 self._fs_cmd('volume', 'create', badname)
7455 self._fs_cmd('volume', 'create', volname)
7456
7457 with self.assertRaises(CommandFailedError):
7458 self._fs_cmd('subvolumegroup', 'create', volname, badname)
7459
7460 with self.assertRaises(CommandFailedError):
7461 self._fs_cmd('subvolume', 'create', volname, badname)
7462 self._fs_cmd('volume', 'rm', volname, '--yes-i-really-mean-it')
7463
7464 def test_subvolume_ops_on_nonexistent_vol(self):
7465 # tests the fs subvolume operations on non existing volume
7466
7467 volname = "non_existent_subvolume"
7468
7469 # try subvolume operations
7470 for op in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
7471 try:
7472 if op == "resize":
7473 self._fs_cmd("subvolume", "resize", volname, "subvolname_1", "inf")
7474 elif op == "pin":
7475 self._fs_cmd("subvolume", "pin", volname, "subvolname_1", "export", "1")
7476 elif op == "ls":
7477 self._fs_cmd("subvolume", "ls", volname)
7478 else:
7479 self._fs_cmd("subvolume", op, volname, "subvolume_1")
7480 except CommandFailedError as ce:
7481 self.assertEqual(ce.exitstatus, errno.ENOENT)
7482 else:
7483 self.fail("expected the 'fs subvolume {0}' command to fail".format(op))
7484
7485 # try subvolume snapshot operations and clone create
7486 for op in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
7487 try:
7488 if op == "ls":
7489 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1")
7490 elif op == "clone":
7491 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1", "clone_1")
7492 else:
7493 self._fs_cmd("subvolume", "snapshot", op, volname, "subvolume_1", "snapshot_1")
7494 except CommandFailedError as ce:
7495 self.assertEqual(ce.exitstatus, errno.ENOENT)
7496 else:
7497 self.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op))
7498
7499 # try, clone status
7500 try:
7501 self._fs_cmd("clone", "status", volname, "clone_1")
7502 except CommandFailedError as ce:
7503 self.assertEqual(ce.exitstatus, errno.ENOENT)
7504 else:
7505 self.fail("expected the 'fs clone status' command to fail")
7506
7507 # try subvolumegroup operations
7508 for op in ("create", "rm", "getpath", "pin", "ls"):
7509 try:
7510 if op == "pin":
7511 self._fs_cmd("subvolumegroup", "pin", volname, "group_1", "export", "0")
7512 elif op == "ls":
7513 self._fs_cmd("subvolumegroup", op, volname)
7514 else:
7515 self._fs_cmd("subvolumegroup", op, volname, "group_1")
7516 except CommandFailedError as ce:
7517 self.assertEqual(ce.exitstatus, errno.ENOENT)
7518 else:
7519 self.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op))
7520
7521 # try subvolumegroup snapshot operations
7522 for op in ("create", "rm", "ls"):
7523 try:
7524 if op == "ls":
7525 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1")
7526 else:
7527 self._fs_cmd("subvolumegroup", "snapshot", op, volname, "group_1", "snapshot_1")
7528 except CommandFailedError as ce:
7529 self.assertEqual(ce.exitstatus, errno.ENOENT)
7530 else:
7531 self.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op))
7532
7533 def test_subvolume_upgrade_legacy_to_v1(self):
7534 """
7535 poor man's upgrade test -- rather than going through a full upgrade cycle,
7536 emulate subvolumes by going through the wormhole and verify if they are
7537 accessible.
7538 further ensure that a legacy volume is not updated to v2.
7539 """
7540 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
7541 group = self._generate_random_group_name()
7542
7543 # emulate a old-fashioned subvolume -- one in the default group and
7544 # the other in a custom group
7545 createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
7546 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
7547
7548 # create group
7549 createpath2 = os.path.join(".", "volumes", group, subvolume2)
7550 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath2], omit_sudo=False)
7551
7552 # this would auto-upgrade on access without anyone noticing
7553 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
7554 self.assertNotEqual(subvolpath1, None)
7555 subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline
7556
7557 subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
7558 self.assertNotEqual(subvolpath2, None)
7559 subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline
7560
7561 # and... the subvolume path returned should be what we created behind the scene
7562 self.assertEqual(createpath1[1:], subvolpath1)
7563 self.assertEqual(createpath2[1:], subvolpath2)
7564
7565 # ensure metadata file is in legacy location, with required version v1
7566 self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True)
7567 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True)
7568
7569 # remove subvolume
7570 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7571 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
7572
7573 # verify trash dir is clean
7574 self._wait_for_trash_empty()
7575
7576 # remove group
7577 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7578
7579 def test_subvolume_no_upgrade_v1_sanity(self):
7580 """
7581 poor man's upgrade test -- theme continues...
7582
7583 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
7584 a series of operations on the v1 subvolume to ensure they work as expected.
7585 """
7586 subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
7587 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
7588 "type", "uid", "features", "state"]
7589 snap_md = ["created_at", "data_pool", "has_pending_clones"]
7590
7591 subvolume = self._generate_random_subvolume_name()
7592 snapshot = self._generate_random_snapshot_name()
7593 clone1, clone2 = self._generate_random_clone_name(2)
7594 mode = "777"
7595 uid = "1000"
7596 gid = "1000"
7597
7598 # emulate a v1 subvolume -- in the default group
7599 subvolume_path = self._create_v1_subvolume(subvolume)
7600
7601 # getpath
7602 subvolpath = self._get_subvolume_path(self.volname, subvolume)
7603 self.assertEqual(subvolpath, subvolume_path)
7604
7605 # ls
7606 subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
7607 self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
7608 self.assertEqual(subvolumes[0]['name'], subvolume,
7609 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
7610
7611 # info
7612 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
7613 for md in subvol_md:
7614 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
7615
7616 self.assertEqual(subvol_info["state"], "complete",
7617 msg="expected state to be 'complete', found '{0}".format(subvol_info["state"]))
7618 self.assertEqual(len(subvol_info["features"]), 2,
7619 msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
7620 for feature in ['snapshot-clone', 'snapshot-autoprotect']:
7621 self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
7622
7623 # resize
7624 nsize = self.DEFAULT_FILE_SIZE*1024*1024*10
7625 self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
7626 subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
7627 for md in subvol_md:
7628 self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
7629 self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
7630
7631 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
7632 self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid)
7633
7634 # do some IO
7635 self._do_subvolume_io(subvolume, number_of_files=8)
7636
7637 # snap-create
7638 self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
7639
7640 # clone
7641 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
7642
7643 # check clone status
7644 self._wait_for_clone_to_complete(clone1)
7645
7646 # ensure clone is v2
7647 self._assert_meta_location_and_version(self.volname, clone1, version=2)
7648
7649 # verify clone
7650 self._verify_clone(subvolume, snapshot, clone1, source_version=1)
7651
7652 # clone (older snapshot)
7653 self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2)
7654
7655 # check clone status
7656 self._wait_for_clone_to_complete(clone2)
7657
7658 # ensure clone is v2
7659 self._assert_meta_location_and_version(self.volname, clone2, version=2)
7660
7661 # verify clone
7662 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
7663 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
7664
7665 # snap-info
7666 snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
7667 for md in snap_md:
7668 self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
7669 self.assertEqual(snap_info["has_pending_clones"], "no")
7670
7671 # snap-ls
7672 subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
7673 self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots)))
7674 snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots]
7675 for name in [snapshot, 'fake']:
7676 self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name))
7677
7678 # snap-rm
7679 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
7680 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake")
7681
7682 # ensure volume is still at version 1
7683 self._assert_meta_location_and_version(self.volname, subvolume, version=1)
7684
7685 # rm
7686 self._fs_cmd("subvolume", "rm", self.volname, subvolume)
7687 self._fs_cmd("subvolume", "rm", self.volname, clone1)
7688 self._fs_cmd("subvolume", "rm", self.volname, clone2)
7689
7690 # verify trash dir is clean
7691 self._wait_for_trash_empty()
7692
7693 def test_subvolume_no_upgrade_v1_to_v2(self):
7694 """
7695 poor man's upgrade test -- theme continues...
7696 ensure v1 to v2 upgrades are not done automatically due to various states of v1
7697 """
7698 subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3)
7699 group = self._generate_random_group_name()
7700
7701 # emulate a v1 subvolume -- in the default group
7702 subvol1_path = self._create_v1_subvolume(subvolume1)
7703
7704 # emulate a v1 subvolume -- in a custom group
7705 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group)
7706
7707 # emulate a v1 subvolume -- in a clone pending state
7708 self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending')
7709
7710 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
7711 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
7712 self.assertEqual(subvolpath1, subvol1_path)
7713
7714 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
7715 self.assertEqual(subvolpath2, subvol2_path)
7716
7717 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
7718 # use clone status, as only certain operations are allowed in pending state
7719 status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3))
7720 self.assertEqual(status["status"]["state"], "pending")
7721
7722 # remove snapshot
7723 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake")
7724 self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group)
7725
7726 # ensure metadata file is in v1 location, with version retained as v1
7727 self._assert_meta_location_and_version(self.volname, subvolume1, version=1)
7728 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1)
7729
7730 # remove subvolume
7731 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7732 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
7733 try:
7734 self._fs_cmd("subvolume", "rm", self.volname, subvolume3)
7735 except CommandFailedError as ce:
7736 self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone")
7737 else:
7738 self.fail("expected rm of subvolume undergoing clone to fail")
7739
7740 # ensure metadata file is in v1 location, with version retained as v1
7741 self._assert_meta_location_and_version(self.volname, subvolume3, version=1)
7742 self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force")
7743
7744 # verify list subvolumes returns an empty list
7745 subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
7746 self.assertEqual(len(subvolumels), 0)
7747
7748 # verify trash dir is clean
7749 self._wait_for_trash_empty()
7750
7751 def test_subvolume_upgrade_v1_to_v2(self):
7752 """
7753 poor man's upgrade test -- theme continues...
7754 ensure v1 to v2 upgrades work
7755 """
7756 subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
7757 group = self._generate_random_group_name()
7758
7759 # emulate a v1 subvolume -- in the default group
7760 subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False)
7761
7762 # emulate a v1 subvolume -- in a custom group
7763 subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False)
7764
7765 # this would attempt auto-upgrade on access
7766 subvolpath1 = self._get_subvolume_path(self.volname, subvolume1)
7767 self.assertEqual(subvolpath1, subvol1_path)
7768
7769 subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group)
7770 self.assertEqual(subvolpath2, subvol2_path)
7771
7772 # ensure metadata file is in v2 location, with version retained as v2
7773 self._assert_meta_location_and_version(self.volname, subvolume1, version=2)
7774 self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2)
7775
7776 # remove subvolume
7777 self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
7778 self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
7779
7780 # verify trash dir is clean
7781 self._wait_for_trash_empty()
7782
7783 def test_malicious_metafile_on_legacy_to_v1_upgrade(self):
7784 """
7785 Validate handcrafted .meta file on legacy subvol root doesn't break the system
7786 on legacy subvol upgrade to v1
7787 poor man's upgrade test -- theme continues...
7788 """
7789 subvol1, subvol2 = self._generate_random_subvolume_name(2)
7790
7791 # emulate a old-fashioned subvolume in the default group
7792 createpath1 = os.path.join(".", "volumes", "_nogroup", subvol1)
7793 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
7794
7795 # add required xattrs to subvolume
7796 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7797 self.mount_a.setfattr(createpath1, 'ceph.dir.layout.pool', default_pool, sudo=True)
7798
7799 # create v2 subvolume
7800 self._fs_cmd("subvolume", "create", self.volname, subvol2)
7801
7802 # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume
7803 # .meta into legacy subvol1's root
7804 subvol2_metapath = os.path.join(".", "volumes", "_nogroup", subvol2, ".meta")
7805 self.mount_a.run_shell(['sudo', 'cp', subvol2_metapath, createpath1], omit_sudo=False)
7806
7807 # Upgrade legacy subvol1 to v1
7808 subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvol1)
7809 self.assertNotEqual(subvolpath1, None)
7810 subvolpath1 = subvolpath1.rstrip()
7811
7812 # the subvolume path returned should not be of subvol2 from handcrafted
7813 # .meta file
7814 self.assertEqual(createpath1[1:], subvolpath1)
7815
7816 # ensure metadata file is in legacy location, with required version v1
7817 self._assert_meta_location_and_version(self.volname, subvol1, version=1, legacy=True)
7818
7819 # Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2
7820 # path whose '.meta' file is copied to subvol1 root
7821 authid1 = "alice"
7822 self._fs_cmd("subvolume", "authorize", self.volname, subvol1, authid1)
7823
7824 # Validate that the mds path added is of subvol1 and not of subvol2
7825 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
7826 self.assertEqual("client.alice", out[0]["entity"])
7827 self.assertEqual("allow rw path={0}".format(createpath1[1:]), out[0]["caps"]["mds"])
7828
7829 # remove subvolume
7830 self._fs_cmd("subvolume", "rm", self.volname, subvol1)
7831 self._fs_cmd("subvolume", "rm", self.volname, subvol2)
7832
7833 # verify trash dir is clean
7834 self._wait_for_trash_empty()
7835
7836 def test_binary_metafile_on_legacy_to_v1_upgrade(self):
7837 """
7838 Validate binary .meta file on legacy subvol root doesn't break the system
7839 on legacy subvol upgrade to v1
7840 poor man's upgrade test -- theme continues...
7841 """
7842 subvol = self._generate_random_subvolume_name()
7843 group = self._generate_random_group_name()
7844
7845 # emulate a old-fashioned subvolume -- in a custom group
7846 createpath = os.path.join(".", "volumes", group, subvol)
7847 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
7848
7849 # add required xattrs to subvolume
7850 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7851 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
7852
7853 # Create unparseable binary .meta file on legacy subvol's root
7854 meta_contents = os.urandom(4096)
7855 meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta")
7856 self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True)
7857
7858 # Upgrade legacy subvol to v1
7859 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group)
7860 self.assertNotEqual(subvolpath, None)
7861 subvolpath = subvolpath.rstrip()
7862
7863 # The legacy subvolume path should be returned for subvol.
7864 # Should ignore unparseable binary .meta file in subvol's root
7865 self.assertEqual(createpath[1:], subvolpath)
7866
7867 # ensure metadata file is in legacy location, with required version v1
7868 self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True)
7869
7870 # remove subvolume
7871 self._fs_cmd("subvolume", "rm", self.volname, subvol, group)
7872
7873 # verify trash dir is clean
7874 self._wait_for_trash_empty()
7875
7876 # remove group
7877 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7878
7879 def test_unparseable_metafile_on_legacy_to_v1_upgrade(self):
7880 """
7881 Validate unparseable text .meta file on legacy subvol root doesn't break the system
7882 on legacy subvol upgrade to v1
7883 poor man's upgrade test -- theme continues...
7884 """
7885 subvol = self._generate_random_subvolume_name()
7886 group = self._generate_random_group_name()
7887
7888 # emulate a old-fashioned subvolume -- in a custom group
7889 createpath = os.path.join(".", "volumes", group, subvol)
7890 self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
7891
7892 # add required xattrs to subvolume
7893 default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
7894 self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
7895
7896 # Create unparseable text .meta file on legacy subvol's root
7897 meta_contents = "unparseable config\nfile ...\nunparseable config\nfile ...\n"
7898 meta_filepath = os.path.join(self.mount_a.mountpoint, createpath, ".meta")
7899 self.mount_a.client_remote.write_file(meta_filepath, meta_contents, sudo=True)
7900
7901 # Upgrade legacy subvol to v1
7902 subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvol, group)
7903 self.assertNotEqual(subvolpath, None)
7904 subvolpath = subvolpath.rstrip()
7905
7906 # The legacy subvolume path should be returned for subvol.
7907 # Should ignore unparseable binary .meta file in subvol's root
7908 self.assertEqual(createpath[1:], subvolpath)
7909
7910 # ensure metadata file is in legacy location, with required version v1
7911 self._assert_meta_location_and_version(self.volname, subvol, subvol_group=group, version=1, legacy=True)
7912
7913 # remove subvolume
7914 self._fs_cmd("subvolume", "rm", self.volname, subvol, group)
7915
7916 # verify trash dir is clean
7917 self._wait_for_trash_empty()
7918
7919 # remove group
7920 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7921
7922 class TestPerModuleFinsherThread(TestVolumesHelper):
7923 """
7924 Per module finisher thread tests related to mgr/volume cmds.
7925 This is used in conjuction with check_counter with min val being 4
7926 as four subvolume cmds are run
7927 """
7928 def test_volumes_module_finisher_thread(self):
7929 subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
7930 group = self._generate_random_group_name()
7931
7932 # create group
7933 self._fs_cmd("subvolumegroup", "create", self.volname, group)
7934
7935 # create subvolumes in group
7936 self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
7937 self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group)
7938 self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group)
7939
7940 self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
7941 self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
7942 self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
7943 self._fs_cmd("subvolumegroup", "rm", self.volname, group)
7944
7945 # verify trash dir is clean
7946 self._wait_for_trash_empty()