10 from hashlib
import md5
11 from textwrap
import dedent
12 from io
import StringIO
14 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
15 from tasks
.cephfs
.fuse_mount
import FuseMount
16 from teuthology
.exceptions
import CommandFailedError
18 log
= logging
.getLogger(__name__
)
20 class TestVolumesHelper(CephFSTestCase
):
21 """Helper class for testing FS volume, subvolume group and subvolume operations."""
22 TEST_VOLUME_PREFIX
= "volume"
23 TEST_SUBVOLUME_PREFIX
="subvolume"
24 TEST_GROUP_PREFIX
="group"
25 TEST_SNAPSHOT_PREFIX
="snapshot"
26 TEST_CLONE_PREFIX
="clone"
27 TEST_FILE_NAME_PREFIX
="subvolume_file"
29 # for filling subvolume with data
34 DEFAULT_FILE_SIZE
= 1 # MB
35 DEFAULT_NUMBER_OF_FILES
= 1024
37 def _fs_cmd(self
, *args
):
38 return self
.mgr_cluster
.mon_manager
.raw_cluster_cmd("fs", *args
)
40 def _raw_cmd(self
, *args
):
41 return self
.mgr_cluster
.mon_manager
.raw_cluster_cmd(*args
)
43 def __check_clone_state(self
, state
, clone
, clone_group
=None, timo
=120):
45 args
= ["clone", "status", self
.volname
, clone
]
47 args
.append(clone_group
)
50 result
= json
.loads(self
._fs
_cmd
(*args
))
51 if result
["status"]["state"] == state
:
55 self
.assertTrue(check
< timo
)
57 def _get_clone_status(self
, clone
, clone_group
=None):
58 args
= ["clone", "status", self
.volname
, clone
]
60 args
.append(clone_group
)
62 result
= json
.loads(self
._fs
_cmd
(*args
))
65 def _wait_for_clone_to_complete(self
, clone
, clone_group
=None, timo
=120):
66 self
.__check
_clone
_state
("complete", clone
, clone_group
, timo
)
68 def _wait_for_clone_to_fail(self
, clone
, clone_group
=None, timo
=120):
69 self
.__check
_clone
_state
("failed", clone
, clone_group
, timo
)
71 def _wait_for_clone_to_be_in_progress(self
, clone
, clone_group
=None, timo
=120):
72 self
.__check
_clone
_state
("in-progress", clone
, clone_group
, timo
)
74 def _check_clone_canceled(self
, clone
, clone_group
=None):
75 self
.__check
_clone
_state
("canceled", clone
, clone_group
, timo
=1)
77 def _get_subvolume_snapshot_path(self
, subvolume
, snapshot
, source_group
, subvol_path
, source_version
):
78 if source_version
== 2:
80 if subvol_path
is not None:
81 (base_path
, uuid_str
) = os
.path
.split(subvol_path
)
83 (base_path
, uuid_str
) = os
.path
.split(self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=source_group
))
84 return os
.path
.join(base_path
, ".snap", snapshot
, uuid_str
)
87 base_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=source_group
)
88 return os
.path
.join(base_path
, ".snap", snapshot
)
90 def _verify_clone_attrs(self
, source_path
, clone_path
):
94 p
= self
.mount_a
.run_shell(["find", path1
])
95 paths
= p
.stdout
.getvalue().strip().split()
97 # for each entry in source and clone (sink) verify certain inode attributes:
98 # inode type, mode, ownership, [am]time.
99 for source_path
in paths
:
100 sink_entry
= source_path
[len(path1
)+1:]
101 sink_path
= os
.path
.join(path2
, sink_entry
)
104 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%f', source_path
]).stdout
.getvalue().strip(), 16)
105 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%f', sink_path
]).stdout
.getvalue().strip(), 16)
106 self
.assertEqual(sval
, cval
)
109 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', source_path
]).stdout
.getvalue().strip())
110 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', sink_path
]).stdout
.getvalue().strip())
111 self
.assertEqual(sval
, cval
)
113 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', source_path
]).stdout
.getvalue().strip())
114 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', sink_path
]).stdout
.getvalue().strip())
115 self
.assertEqual(sval
, cval
)
118 # do not check access as kclient will generally not update this like ceph-fuse will.
119 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%Y', source_path
]).stdout
.getvalue().strip())
120 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%Y', sink_path
]).stdout
.getvalue().strip())
121 self
.assertEqual(sval
, cval
)
123 def _verify_clone_root(self
, source_path
, clone_path
, clone
, clone_group
, clone_pool
):
124 # verifies following clone root attrs quota, data_pool and pool_namespace
125 # remaining attributes of clone root are validated in _verify_clone_attrs
127 clone_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, clone
, clone_group
))
129 # verify quota is inherited from source snapshot
130 src_quota
= self
.mount_a
.getfattr(source_path
, "ceph.quota.max_bytes")
131 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
132 if isinstance(self
.mount_a
, FuseMount
):
133 self
.assertEqual(clone_info
["bytes_quota"], "infinite" if src_quota
is None else int(src_quota
))
136 # verify pool is set as per request
137 self
.assertEqual(clone_info
["data_pool"], clone_pool
)
139 # verify pool and pool namespace are inherited from snapshot
140 self
.assertEqual(clone_info
["data_pool"],
141 self
.mount_a
.getfattr(source_path
, "ceph.dir.layout.pool"))
142 self
.assertEqual(clone_info
["pool_namespace"],
143 self
.mount_a
.getfattr(source_path
, "ceph.dir.layout.pool_namespace"))
145 def _verify_clone(self
, subvolume
, snapshot
, clone
,
146 source_group
=None, clone_group
=None, clone_pool
=None,
147 subvol_path
=None, source_version
=2, timo
=120):
148 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
149 # but snapshots are retained for clone verification
150 path1
= self
._get
_subvolume
_snapshot
_path
(subvolume
, snapshot
, source_group
, subvol_path
, source_version
)
151 path2
= self
._get
_subvolume
_path
(self
.volname
, clone
, group_name
=clone_group
)
154 # TODO: currently snapshot rentries are not stable if snapshot source entries
155 # are removed, https://tracker.ceph.com/issues/46747
156 while check
< timo
and subvol_path
is None:
157 val1
= int(self
.mount_a
.getfattr(path1
, "ceph.dir.rentries"))
158 val2
= int(self
.mount_a
.getfattr(path2
, "ceph.dir.rentries"))
163 self
.assertTrue(check
< timo
)
165 self
._verify
_clone
_root
(path1
, path2
, clone
, clone_group
, clone_pool
)
166 self
._verify
_clone
_attrs
(path1
, path2
)
168 def _generate_random_volume_name(self
, count
=1):
169 n
= self
.volume_start
170 volumes
= [f
"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
171 self
.volume_start
+= count
172 return volumes
[0] if count
== 1 else volumes
174 def _generate_random_subvolume_name(self
, count
=1):
175 n
= self
.subvolume_start
176 subvolumes
= [f
"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
177 self
.subvolume_start
+= count
178 return subvolumes
[0] if count
== 1 else subvolumes
180 def _generate_random_group_name(self
, count
=1):
182 groups
= [f
"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
183 self
.group_start
+= count
184 return groups
[0] if count
== 1 else groups
186 def _generate_random_snapshot_name(self
, count
=1):
187 n
= self
.snapshot_start
188 snaps
= [f
"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
189 self
.snapshot_start
+= count
190 return snaps
[0] if count
== 1 else snaps
192 def _generate_random_clone_name(self
, count
=1):
194 clones
= [f
"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
195 self
.clone_start
+= count
196 return clones
[0] if count
== 1 else clones
198 def _enable_multi_fs(self
):
199 self
._fs
_cmd
("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
201 def _create_or_reuse_test_volume(self
):
202 result
= json
.loads(self
._fs
_cmd
("volume", "ls"))
204 self
.vol_created
= True
205 self
.volname
= self
._generate
_random
_volume
_name
()
206 self
._fs
_cmd
("volume", "create", self
.volname
)
208 self
.volname
= result
[0]['name']
210 def _get_volume_info(self
, vol_name
, human_readable
=False):
212 args
= ["volume", "info", vol_name
, human_readable
]
214 args
= ["volume", "info", vol_name
]
216 vol_md
= self
._fs
_cmd
(*args
)
219 def _get_subvolume_group_path(self
, vol_name
, group_name
):
220 args
= ("subvolumegroup", "getpath", vol_name
, group_name
)
221 path
= self
._fs
_cmd
(*args
)
222 # remove the leading '/', and trailing whitespaces
223 return path
[1:].rstrip()
225 def _get_subvolume_group_info(self
, vol_name
, group_name
):
226 args
= ["subvolumegroup", "info", vol_name
, group_name
]
228 group_md
= self
._fs
_cmd
(*args
)
231 def _get_subvolume_path(self
, vol_name
, subvol_name
, group_name
=None):
232 args
= ["subvolume", "getpath", vol_name
, subvol_name
]
234 args
.append(group_name
)
236 path
= self
._fs
_cmd
(*args
)
237 # remove the leading '/', and trailing whitespaces
238 return path
[1:].rstrip()
240 def _get_subvolume_info(self
, vol_name
, subvol_name
, group_name
=None):
241 args
= ["subvolume", "info", vol_name
, subvol_name
]
243 args
.append(group_name
)
245 subvol_md
= self
._fs
_cmd
(*args
)
248 def _get_subvolume_snapshot_info(self
, vol_name
, subvol_name
, snapname
, group_name
=None):
249 args
= ["subvolume", "snapshot", "info", vol_name
, subvol_name
, snapname
]
251 args
.append(group_name
)
253 snap_md
= self
._fs
_cmd
(*args
)
256 def _delete_test_volume(self
):
257 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
259 def _do_subvolume_pool_and_namespace_update(self
, subvolume
, pool
=None, pool_namespace
=None, subvolume_group
=None):
260 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
263 self
.mount_a
.setfattr(subvolpath
, 'ceph.dir.layout.pool', pool
, sudo
=True)
265 if pool_namespace
is not None:
266 self
.mount_a
.setfattr(subvolpath
, 'ceph.dir.layout.pool_namespace', pool_namespace
, sudo
=True)
268 def _do_subvolume_attr_update(self
, subvolume
, uid
, gid
, mode
, subvolume_group
=None):
269 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
272 self
.mount_a
.run_shell(['sudo', 'chmod', mode
, subvolpath
], omit_sudo
=False)
275 self
.mount_a
.run_shell(['sudo', 'chown', uid
, subvolpath
], omit_sudo
=False)
276 self
.mount_a
.run_shell(['sudo', 'chgrp', gid
, subvolpath
], omit_sudo
=False)
278 def _do_subvolume_io(self
, subvolume
, subvolume_group
=None, create_dir
=None,
279 number_of_files
=DEFAULT_NUMBER_OF_FILES
, file_size
=DEFAULT_FILE_SIZE
):
280 # get subvolume path for IO
281 args
= ["subvolume", "getpath", self
.volname
, subvolume
]
283 args
.append(subvolume_group
)
285 subvolpath
= self
._fs
_cmd
(*args
)
286 self
.assertNotEqual(subvolpath
, None)
287 subvolpath
= subvolpath
[1:].rstrip() # remove "/" prefix and any trailing newline
291 io_path
= os
.path
.join(subvolpath
, create_dir
)
292 self
.mount_a
.run_shell_payload(f
"mkdir -p {io_path}")
294 log
.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume
, number_of_files
, file_size
, io_path
))
295 for i
in range(number_of_files
):
296 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
297 self
.mount_a
.write_n_mb(os
.path
.join(io_path
, filename
), file_size
)
299 def _do_subvolume_io_mixed(self
, subvolume
, subvolume_group
=None):
300 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
302 reg_file
= "regfile.0"
303 dir_path
= os
.path
.join(subvolpath
, "dir.0")
304 sym_path1
= os
.path
.join(subvolpath
, "sym.0")
305 # this symlink's ownership would be changed
306 sym_path2
= os
.path
.join(dir_path
, "sym.0")
308 self
.mount_a
.run_shell(["mkdir", dir_path
])
309 self
.mount_a
.run_shell(["ln", "-s", "./{}".format(reg_file
), sym_path1
])
310 self
.mount_a
.run_shell(["ln", "-s", "./{}".format(reg_file
), sym_path2
])
311 # flip ownership to nobody. assumption: nobody's id is 65534
312 self
.mount_a
.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2
], omit_sudo
=False)
314 def _wait_for_trash_empty(self
, timeout
=60):
315 # XXX: construct the trash dir path (note that there is no mgr
316 # [sub]volume interface for this).
317 trashdir
= os
.path
.join("./", "volumes", "_deleting")
318 self
.mount_a
.wait_for_dir_empty(trashdir
, timeout
=timeout
)
320 def _wait_for_subvol_trash_empty(self
, subvol
, group
="_nogroup", timeout
=30):
321 trashdir
= os
.path
.join("./", "volumes", group
, subvol
, ".trash")
323 self
.mount_a
.wait_for_dir_empty(trashdir
, timeout
=timeout
)
324 except CommandFailedError
as ce
:
325 if ce
.exitstatus
!= errno
.ENOENT
:
330 def _assert_meta_location_and_version(self
, vol_name
, subvol_name
, subvol_group
=None, version
=2, legacy
=False):
332 subvol_path
= self
._get
_subvolume
_path
(vol_name
, subvol_name
, group_name
=subvol_group
)
334 m
.update(("/"+subvol_path
).encode('utf-8'))
335 meta_filename
= "{0}.meta".format(m
.digest().hex())
336 metapath
= os
.path
.join(".", "volumes", "_legacy", meta_filename
)
338 group
= subvol_group
if subvol_group
is not None else '_nogroup'
339 metapath
= os
.path
.join(".", "volumes", group
, subvol_name
, ".meta")
341 out
= self
.mount_a
.run_shell(['sudo', 'cat', metapath
], omit_sudo
=False)
342 lines
= out
.stdout
.getvalue().strip().split('\n')
345 if line
== "version = " + str(version
):
348 self
.assertEqual(sv_version
, version
, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
349 version
, sv_version
, metapath
))
351 def _create_v1_subvolume(self
, subvol_name
, subvol_group
=None, has_snapshot
=True, subvol_type
='subvolume', state
='complete'):
352 group
= subvol_group
if subvol_group
is not None else '_nogroup'
353 basepath
= os
.path
.join("volumes", group
, subvol_name
)
354 uuid_str
= str(uuid
.uuid4())
355 createpath
= os
.path
.join(basepath
, uuid_str
)
356 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
358 # create a v1 snapshot, to prevent auto upgrades
360 snappath
= os
.path
.join(createpath
, ".snap", "fake")
361 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', snappath
], omit_sudo
=False)
363 # add required xattrs to subvolume
364 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
365 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
367 # create a v1 .meta file
368 meta_contents
= "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type
, "/" + createpath
, state
)
369 if state
== 'pending':
370 # add a fake clone source
371 meta_contents
= meta_contents
+ '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
372 meta_filepath1
= os
.path
.join(self
.mount_a
.mountpoint
, basepath
, ".meta")
373 self
.mount_a
.client_remote
.write_file(meta_filepath1
, meta_contents
, sudo
=True)
376 def _update_fake_trash(self
, subvol_name
, subvol_group
=None, trash_name
='fake', create
=True):
377 group
= subvol_group
if subvol_group
is not None else '_nogroup'
378 trashpath
= os
.path
.join("volumes", group
, subvol_name
, '.trash', trash_name
)
380 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', trashpath
], omit_sudo
=False)
382 self
.mount_a
.run_shell(['sudo', 'rmdir', trashpath
], omit_sudo
=False)
384 def _configure_guest_auth(self
, guest_mount
, authid
, key
):
386 Set up auth credentials for a guest client.
388 # Create keyring file for the guest client.
389 keyring_txt
= dedent("""
393 """.format(authid
=authid
,key
=key
))
395 guest_mount
.client_id
= authid
396 guest_mount
.client_remote
.write_file(guest_mount
.get_keyring_path(),
397 keyring_txt
, sudo
=True)
398 # Add a guest client section to the ceph config file.
399 self
.config_set("client.{0}".format(authid
), "debug client", 20)
400 self
.config_set("client.{0}".format(authid
), "debug objecter", 20)
401 self
.set_conf("client.{0}".format(authid
),
402 "keyring", guest_mount
.get_keyring_path())
404 def _auth_metadata_get(self
, filedata
):
406 Return a deserialized JSON object, or None
409 data
= json
.loads(filedata
)
410 except json
.decoder
.JSONDecodeError
:
415 super(TestVolumesHelper
, self
).setUp()
417 self
.vol_created
= False
418 self
._enable
_multi
_fs
()
419 self
._create
_or
_reuse
_test
_volume
()
420 self
.config_set('mon', 'mon_allow_pool_delete', True)
421 self
.volume_start
= random
.randint(1, (1<<20))
422 self
.subvolume_start
= random
.randint(1, (1<<20))
423 self
.group_start
= random
.randint(1, (1<<20))
424 self
.snapshot_start
= random
.randint(1, (1<<20))
425 self
.clone_start
= random
.randint(1, (1<<20))
429 self
._delete
_test
_volume
()
430 super(TestVolumesHelper
, self
).tearDown()
433 class TestVolumes(TestVolumesHelper
):
434 """Tests for FS volume operations."""
435 def test_volume_create(self
):
437 That the volume can be created and then cleans up
439 volname
= self
._generate
_random
_volume
_name
()
440 self
._fs
_cmd
("volume", "create", volname
)
441 volumels
= json
.loads(self
._fs
_cmd
("volume", "ls"))
443 if not (volname
in ([volume
['name'] for volume
in volumels
])):
444 raise RuntimeError("Error creating volume '{0}'".format(volname
))
447 self
._fs
_cmd
("volume", "rm", volname
, "--yes-i-really-mean-it")
449 def test_volume_ls(self
):
451 That the existing and the newly created volumes can be listed and
454 vls
= json
.loads(self
._fs
_cmd
("volume", "ls"))
455 volumes
= [volume
['name'] for volume
in vls
]
457 #create new volumes and add it to the existing list of volumes
458 volumenames
= self
._generate
_random
_volume
_name
(2)
459 for volumename
in volumenames
:
460 self
._fs
_cmd
("volume", "create", volumename
)
461 volumes
.extend(volumenames
)
465 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
466 if len(volumels
) == 0:
467 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
469 volnames
= [volume
['name'] for volume
in volumels
]
470 if collections
.Counter(volnames
) != collections
.Counter(volumes
):
471 raise RuntimeError("Error creating or listing volumes")
474 for volume
in volumenames
:
475 self
._fs
_cmd
("volume", "rm", volume
, "--yes-i-really-mean-it")
477 def test_volume_rm(self
):
479 That the volume can only be removed when --yes-i-really-mean-it is used
480 and verify that the deleted volume is not listed anymore.
482 for m
in self
.mounts
:
485 self
._fs
_cmd
("volume", "rm", self
.volname
)
486 except CommandFailedError
as ce
:
487 if ce
.exitstatus
!= errno
.EPERM
:
488 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
489 "but it failed with {0}".format(ce
.exitstatus
))
491 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
494 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
495 if (self
.volname
in [volume
['name'] for volume
in volumes
]):
496 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
497 "The volume {0} not removed.".format(self
.volname
))
499 raise RuntimeError("expected the 'fs volume rm' command to fail.")
501 def test_volume_rm_arbitrary_pool_removal(self
):
503 That the arbitrary pool added to the volume out of band is removed
504 successfully on volume removal.
506 for m
in self
.mounts
:
508 new_pool
= "new_pool"
509 # add arbitrary data pool
510 self
.fs
.add_data_pool(new_pool
)
511 vol_status
= json
.loads(self
._fs
_cmd
("status", self
.volname
, "--format=json-pretty"))
512 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
515 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
516 volnames
= [volume
['name'] for volume
in volumes
]
517 self
.assertNotIn(self
.volname
, volnames
)
519 #check if osd pools are gone
520 pools
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "--format=json-pretty"))
521 for pool
in vol_status
["pools"]:
522 self
.assertNotIn(pool
["name"], pools
)
524 def test_volume_rm_when_mon_delete_pool_false(self
):
526 That the volume can only be removed when mon_allowd_pool_delete is set
527 to true and verify that the pools are removed after volume deletion.
529 for m
in self
.mounts
:
531 self
.config_set('mon', 'mon_allow_pool_delete', False)
533 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
534 except CommandFailedError
as ce
:
535 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
536 "expected the 'fs volume rm' command to fail with EPERM, "
537 "but it failed with {0}".format(ce
.exitstatus
))
538 vol_status
= json
.loads(self
._fs
_cmd
("status", self
.volname
, "--format=json-pretty"))
539 self
.config_set('mon', 'mon_allow_pool_delete', True)
540 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
543 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
544 volnames
= [volume
['name'] for volume
in volumes
]
545 self
.assertNotIn(self
.volname
, volnames
,
546 "volume {0} exists after removal".format(self
.volname
))
547 #check if pools are gone
548 pools
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "--format=json-pretty"))
549 for pool
in vol_status
["pools"]:
550 self
.assertNotIn(pool
["name"], pools
,
551 "pool {0} exists after volume removal".format(pool
["name"]))
553 def test_volume_rename(self
):
555 That volume, its file system and pools, can be renamed.
557 for m
in self
.mounts
:
559 oldvolname
= self
.volname
560 newvolname
= self
._generate
_random
_volume
_name
()
561 new_data_pool
, new_metadata_pool
= f
"cephfs.{newvolname}.data", f
"cephfs.{newvolname}.meta"
562 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
563 "--yes-i-really-mean-it")
564 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
565 volnames
= [volume
['name'] for volume
in volumels
]
566 # volume name changed
567 self
.assertIn(newvolname
, volnames
)
568 self
.assertNotIn(oldvolname
, volnames
)
570 self
.fs
.get_pool_names(refresh
=True)
571 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
572 self
.assertEqual(new_data_pool
, self
.fs
.get_data_pool_name())
574 def test_volume_rename_idempotency(self
):
576 That volume rename is idempotent.
578 for m
in self
.mounts
:
580 oldvolname
= self
.volname
581 newvolname
= self
._generate
_random
_volume
_name
()
582 new_data_pool
, new_metadata_pool
= f
"cephfs.{newvolname}.data", f
"cephfs.{newvolname}.meta"
583 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
584 "--yes-i-really-mean-it")
585 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
586 "--yes-i-really-mean-it")
587 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
588 volnames
= [volume
['name'] for volume
in volumels
]
589 self
.assertIn(newvolname
, volnames
)
590 self
.assertNotIn(oldvolname
, volnames
)
591 self
.fs
.get_pool_names(refresh
=True)
592 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
593 self
.assertEqual(new_data_pool
, self
.fs
.get_data_pool_name())
595 def test_volume_rename_fails_without_confirmation_flag(self
):
597 That renaming volume fails without --yes-i-really-mean-it flag.
599 newvolname
= self
._generate
_random
_volume
_name
()
601 self
._fs
_cmd
("volume", "rename", self
.volname
, newvolname
)
602 except CommandFailedError
as ce
:
603 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
604 "invalid error code on renaming a FS volume without the "
605 "'--yes-i-really-mean-it' flag")
607 self
.fail("expected renaming of FS volume to fail without the "
608 "'--yes-i-really-mean-it' flag")
610 def test_volume_rename_for_more_than_one_data_pool(self
):
612 That renaming a volume with more than one data pool does not change
613 the name of the data pools.
615 for m
in self
.mounts
:
617 self
.fs
.add_data_pool('another-data-pool')
618 oldvolname
= self
.volname
619 newvolname
= self
._generate
_random
_volume
_name
()
620 self
.fs
.get_pool_names(refresh
=True)
621 orig_data_pool_names
= list(self
.fs
.data_pools
.values())
622 new_metadata_pool
= f
"cephfs.{newvolname}.meta"
623 self
._fs
_cmd
("volume", "rename", self
.volname
, newvolname
,
624 "--yes-i-really-mean-it")
625 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
626 volnames
= [volume
['name'] for volume
in volumels
]
627 # volume name changed
628 self
.assertIn(newvolname
, volnames
)
629 self
.assertNotIn(oldvolname
, volnames
)
630 self
.fs
.get_pool_names(refresh
=True)
631 # metadata pool name changed
632 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
633 # data pool names unchanged
634 self
.assertCountEqual(orig_data_pool_names
, list(self
.fs
.data_pools
.values()))
636 def test_volume_info(self
):
638 Tests the 'fs volume info' command
640 vol_fields
= ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
641 group
= self
._generate
_random
_group
_name
()
642 # create subvolumegroup
643 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
644 # get volume metadata
645 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
))
646 for md
in vol_fields
:
647 self
.assertIn(md
, vol_info
,
648 f
"'{md}' key not present in metadata of volume")
649 self
.assertEqual(vol_info
["used_size"], 0,
650 "Size should be zero when volumes directory is empty")
652 def test_volume_info_without_subvolumegroup(self
):
654 Tests the 'fs volume info' command without subvolume group
656 vol_fields
= ["pools", "mon_addrs"]
657 # get volume metadata
658 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
))
659 for md
in vol_fields
:
660 self
.assertIn(md
, vol_info
,
661 f
"'{md}' key not present in metadata of volume")
662 self
.assertNotIn("used_size", vol_info
,
663 "'used_size' should not be present in absence of subvolumegroup")
664 self
.assertNotIn("pending_subvolume_deletions", vol_info
,
665 "'pending_subvolume_deletions' should not be present in absence"
666 " of subvolumegroup")
668 def test_volume_info_with_human_readable_flag(self
):
670 Tests the 'fs volume info --human_readable' command
672 vol_fields
= ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
673 group
= self
._generate
_random
_group
_name
()
674 # create subvolumegroup
675 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
676 # get volume metadata
677 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
, "--human_readable"))
678 for md
in vol_fields
:
679 self
.assertIn(md
, vol_info
,
680 f
"'{md}' key not present in metadata of volume")
681 units
= [' ', 'k', 'M', 'G', 'T', 'P', 'E']
682 assert vol_info
["used_size"][-1] in units
, "unit suffix in used_size is absent"
683 assert vol_info
["pools"]["data"][0]["avail"][-1] in units
, "unit suffix in avail data is absent"
684 assert vol_info
["pools"]["data"][0]["used"][-1] in units
, "unit suffix in used data is absent"
685 assert vol_info
["pools"]["metadata"][0]["avail"][-1] in units
, "unit suffix in avail metadata is absent"
686 assert vol_info
["pools"]["metadata"][0]["used"][-1] in units
, "unit suffix in used metadata is absent"
687 self
.assertEqual(int(vol_info
["used_size"]), 0,
688 "Size should be zero when volumes directory is empty")
690 def test_volume_info_with_human_readable_flag_without_subvolumegroup(self
):
692 Tests the 'fs volume info --human_readable' command without subvolume group
694 vol_fields
= ["pools", "mon_addrs"]
695 # get volume metadata
696 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
, "--human_readable"))
697 for md
in vol_fields
:
698 self
.assertIn(md
, vol_info
,
699 f
"'{md}' key not present in metadata of volume")
700 units
= [' ', 'k', 'M', 'G', 'T', 'P', 'E']
701 assert vol_info
["pools"]["data"][0]["avail"][-1] in units
, "unit suffix in avail data is absent"
702 assert vol_info
["pools"]["data"][0]["used"][-1] in units
, "unit suffix in used data is absent"
703 assert vol_info
["pools"]["metadata"][0]["avail"][-1] in units
, "unit suffix in avail metadata is absent"
704 assert vol_info
["pools"]["metadata"][0]["used"][-1] in units
, "unit suffix in used metadata is absent"
705 self
.assertNotIn("used_size", vol_info
,
706 "'used_size' should not be present in absence of subvolumegroup")
707 self
.assertNotIn("pending_subvolume_deletions", vol_info
,
708 "'pending_subvolume_deletions' should not be present in absence"
709 " of subvolumegroup")
712 class TestSubvolumeGroups(TestVolumesHelper
):
713 """Tests for FS subvolume group operations."""
714 def test_default_uid_gid_subvolume_group(self
):
715 group
= self
._generate
_random
_group
_name
()
720 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
721 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
723 # check group's uid and gid
724 stat
= self
.mount_a
.stat(group_path
)
725 self
.assertEqual(stat
['st_uid'], expected_uid
)
726 self
.assertEqual(stat
['st_gid'], expected_gid
)
729 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
731 def test_nonexistent_subvolume_group_create(self
):
732 subvolume
= self
._generate
_random
_subvolume
_name
()
733 group
= "non_existent_group"
735 # try, creating subvolume in a nonexistent group
737 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
738 except CommandFailedError
as ce
:
739 if ce
.exitstatus
!= errno
.ENOENT
:
742 raise RuntimeError("expected the 'fs subvolume create' command to fail")
744 def test_nonexistent_subvolume_group_rm(self
):
745 group
= "non_existent_group"
747 # try, remove subvolume group
749 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
750 except CommandFailedError
as ce
:
751 if ce
.exitstatus
!= errno
.ENOENT
:
754 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
756 def test_subvolume_group_create_with_auto_cleanup_on_fail(self
):
757 group
= self
._generate
_random
_group
_name
()
758 data_pool
= "invalid_pool"
759 # create group with invalid data pool layout
760 with self
.assertRaises(CommandFailedError
):
761 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", data_pool
)
763 # check whether group path is cleaned up
765 self
._fs
_cmd
("subvolumegroup", "getpath", self
.volname
, group
)
766 except CommandFailedError
as ce
:
767 if ce
.exitstatus
!= errno
.ENOENT
:
770 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
772 def test_subvolume_group_create_with_desired_data_pool_layout(self
):
773 group1
, group2
= self
._generate
_random
_group
_name
(2)
776 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group1
)
777 group1_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group1
)
779 default_pool
= self
.mount_a
.getfattr(group1_path
, "ceph.dir.layout.pool")
780 new_pool
= "new_pool"
781 self
.assertNotEqual(default_pool
, new_pool
)
784 newid
= self
.fs
.add_data_pool(new_pool
)
786 # create group specifying the new data pool as its pool layout
787 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group2
,
788 "--pool_layout", new_pool
)
789 group2_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group2
)
791 desired_pool
= self
.mount_a
.getfattr(group2_path
, "ceph.dir.layout.pool")
793 self
.assertEqual(desired_pool
, new_pool
)
794 except AssertionError:
795 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
797 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group1
)
798 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group2
)
800 def test_subvolume_group_create_with_desired_mode(self
):
801 group1
, group2
= self
._generate
_random
_group
_name
(2)
803 expected_mode1
= "755"
805 expected_mode2
= "777"
808 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group2
, f
"--mode={expected_mode2}")
809 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group1
)
811 group1_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group1
)
812 group2_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group2
)
813 volumes_path
= os
.path
.dirname(group1_path
)
816 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', group1_path
]).stdout
.getvalue().strip()
817 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', group2_path
]).stdout
.getvalue().strip()
818 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', volumes_path
]).stdout
.getvalue().strip()
819 self
.assertEqual(actual_mode1
, expected_mode1
)
820 self
.assertEqual(actual_mode2
, expected_mode2
)
821 self
.assertEqual(actual_mode3
, expected_mode1
)
823 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group1
)
824 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group2
)
826 def test_subvolume_group_create_with_desired_uid_gid(self
):
828 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
834 # create subvolume group
835 subvolgroupname
= self
._generate
_random
_group
_name
()
836 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, subvolgroupname
, "--uid", str(uid
), "--gid", str(gid
))
838 # make sure it exists
839 subvolgrouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, subvolgroupname
)
840 self
.assertNotEqual(subvolgrouppath
, None)
842 # verify the uid and gid
843 suid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', subvolgrouppath
]).stdout
.getvalue().strip())
844 sgid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', subvolgrouppath
]).stdout
.getvalue().strip())
845 self
.assertEqual(uid
, suid
)
846 self
.assertEqual(gid
, sgid
)
849 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, subvolgroupname
)
851 def test_subvolume_group_create_with_invalid_data_pool_layout(self
):
852 group
= self
._generate
_random
_group
_name
()
853 data_pool
= "invalid_pool"
854 # create group with invalid data pool layout
856 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", data_pool
)
857 except CommandFailedError
as ce
:
858 if ce
.exitstatus
!= errno
.EINVAL
:
861 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
863 def test_subvolume_group_create_with_size(self
):
864 # create group with size -- should set quota
865 group
= self
._generate
_random
_group
_name
()
866 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
869 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
870 self
.assertEqual(group_info
["bytes_quota"], 1000000000)
873 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
875 def test_subvolume_group_info(self
):
876 # tests the 'fs subvolumegroup info' command
878 group_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
879 "data_pool", "gid", "mode", "mon_addrs", "mtime", "uid"]
882 group
= self
._generate
_random
_group
_name
()
883 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
886 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
888 self
.assertIn(md
, group_info
, "'{0}' key not present in metadata of group".format(md
))
890 self
.assertEqual(group_info
["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
891 self
.assertEqual(group_info
["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
892 self
.assertEqual(group_info
["uid"], 0)
893 self
.assertEqual(group_info
["gid"], 0)
895 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
896 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
898 # get group metadata after quota set
899 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
901 self
.assertIn(md
, group_info
, "'{0}' key not present in metadata of subvolume".format(md
))
903 self
.assertNotEqual(group_info
["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is set")
904 self
.assertEqual(group_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
907 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
909 def test_subvolume_group_create_idempotence(self
):
911 group
= self
._generate
_random
_group
_name
()
912 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
914 # try creating w/ same subvolume group name -- should be idempotent
915 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
918 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
920 def test_subvolume_group_create_idempotence_mode(self
):
922 group
= self
._generate
_random
_group
_name
()
923 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
925 # try creating w/ same subvolume group name with mode -- should set mode
926 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--mode=766")
928 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
930 # check subvolumegroup's mode
931 mode
= self
.mount_a
.run_shell(['stat', '-c' '%a', group_path
]).stdout
.getvalue().strip()
932 self
.assertEqual(mode
, "766")
935 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
937 def test_subvolume_group_create_idempotence_uid_gid(self
):
942 group
= self
._generate
_random
_group
_name
()
943 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
945 # try creating w/ same subvolume group name with uid/gid -- should set uid/gid
946 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--uid", str(desired_uid
), "--gid", str(desired_gid
))
948 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
950 # verify the uid and gid
951 actual_uid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', group_path
]).stdout
.getvalue().strip())
952 actual_gid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', group_path
]).stdout
.getvalue().strip())
953 self
.assertEqual(desired_uid
, actual_uid
)
954 self
.assertEqual(desired_gid
, actual_gid
)
957 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
959 def test_subvolume_group_create_idempotence_data_pool(self
):
961 group
= self
._generate
_random
_group
_name
()
962 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
964 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
966 default_pool
= self
.mount_a
.getfattr(group_path
, "ceph.dir.layout.pool")
967 new_pool
= "new_pool"
968 self
.assertNotEqual(default_pool
, new_pool
)
971 newid
= self
.fs
.add_data_pool(new_pool
)
973 # try creating w/ same subvolume group name with new data pool -- should set pool
974 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", new_pool
)
975 desired_pool
= self
.mount_a
.getfattr(group_path
, "ceph.dir.layout.pool")
977 self
.assertEqual(desired_pool
, new_pool
)
978 except AssertionError:
979 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
982 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
984 def test_subvolume_group_create_idempotence_resize(self
):
986 group
= self
._generate
_random
_group
_name
()
987 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
989 # try creating w/ same subvolume name with size -- should set quota
990 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
993 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
994 self
.assertEqual(group_info
["bytes_quota"], 1000000000)
997 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
999 def test_subvolume_group_quota_mds_path_restriction_to_group_path(self
):
1001 Tests subvolumegroup quota enforcement with mds path restriction set to group.
1002 For quota to be enforced, read permission needs to be provided to the parent
1003 of the directory on which quota is set. Please see the tracker comment [1]
1004 [1] https://tracker.ceph.com/issues/55090#note-8
1006 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1007 # create group with 100MB quota
1008 group
= self
._generate
_random
_group
_name
()
1009 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1010 "--size", str(osize
), "--mode=777")
1012 # make sure it exists
1013 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1014 self
.assertNotEqual(grouppath
, None)
1016 # create subvolume under the group
1017 subvolname
= self
._generate
_random
_subvolume
_name
()
1018 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1019 "--group_name", group
, "--mode=777")
1021 # make sure it exists
1022 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1023 self
.assertNotEqual(subvolpath
, None)
1026 authid
= "client.guest1"
1027 user
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd(
1028 "auth", "get-or-create", authid
,
1029 "mds", "allow rw path=/volumes",
1031 "osd", "allow rw tag cephfs *=*",
1033 "--format=json-pretty"
1036 # Prepare guest_mount with new authid
1037 guest_mount
= self
.mount_b
1038 guest_mount
.umount_wait()
1040 # configure credentials for guest client
1041 self
._configure
_guest
_auth
(guest_mount
, "guest1", user
[0]["key"])
1043 # mount the subvolume
1044 mount_path
= os
.path
.join("/", subvolpath
)
1045 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1047 # create 99 files of 1MB
1048 guest_mount
.run_shell_payload("mkdir -p dir1")
1050 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1051 guest_mount
.write_n_mb(os
.path
.join("dir1", filename
), self
.DEFAULT_FILE_SIZE
)
1053 # write two files of 1MB file to exceed the quota
1054 guest_mount
.run_shell_payload("mkdir -p dir2")
1056 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1057 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1058 # For quota to be enforced
1060 # create 400 files of 1MB to exceed quota
1061 for i
in range(400):
1062 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1063 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1064 # Sometimes quota enforcement takes time.
1067 except CommandFailedError
:
1070 self
.fail(f
"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1073 guest_mount
.umount_wait()
1075 # Delete the subvolume
1076 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1079 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1081 # verify trash dir is clean
1082 self
._wait
_for
_trash
_empty
()
1084 def test_subvolume_group_quota_mds_path_restriction_to_subvolume_path(self
):
1086 Tests subvolumegroup quota enforcement with mds path restriction set to subvolume path
1087 The quota should not be enforced because of the fourth limitation mentioned at
1088 https://docs.ceph.com/en/latest/cephfs/quota/#limitations
1090 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1091 # create group with 100MB quota
1092 group
= self
._generate
_random
_group
_name
()
1093 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1094 "--size", str(osize
), "--mode=777")
1096 # make sure it exists
1097 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1098 self
.assertNotEqual(grouppath
, None)
1100 # create subvolume under the group
1101 subvolname
= self
._generate
_random
_subvolume
_name
()
1102 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1103 "--group_name", group
, "--mode=777")
1105 # make sure it exists
1106 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1107 self
.assertNotEqual(subvolpath
, None)
1109 mount_path
= os
.path
.join("/", subvolpath
)
1112 authid
= "client.guest1"
1113 user
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd(
1114 "auth", "get-or-create", authid
,
1115 "mds", f
"allow rw path={mount_path}",
1117 "osd", "allow rw tag cephfs *=*",
1119 "--format=json-pretty"
1122 # Prepare guest_mount with new authid
1123 guest_mount
= self
.mount_b
1124 guest_mount
.umount_wait()
1126 # configure credentials for guest client
1127 self
._configure
_guest
_auth
(guest_mount
, "guest1", user
[0]["key"])
1129 # mount the subvolume
1130 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1132 # create 99 files of 1MB to exceed quota
1133 guest_mount
.run_shell_payload("mkdir -p dir1")
1135 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1136 guest_mount
.write_n_mb(os
.path
.join("dir1", filename
), self
.DEFAULT_FILE_SIZE
)
1138 # write two files of 1MB file to exceed the quota
1139 guest_mount
.run_shell_payload("mkdir -p dir2")
1141 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1142 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1143 # For quota to be enforced
1145 # create 400 files of 1MB to exceed quota
1146 for i
in range(400):
1147 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1148 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1149 # Sometimes quota enforcement takes time.
1152 except CommandFailedError
:
1153 self
.fail(f
"Quota should not be enforced, expected filling subvolume {subvolname} with 400 files of size 1MB to succeed")
1156 guest_mount
.umount_wait()
1158 # Delete the subvolume
1159 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1162 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1164 # verify trash dir is clean
1165 self
._wait
_for
_trash
_empty
()
1167 def test_subvolume_group_quota_exceeded_subvolume_removal(self
):
1169 Tests subvolume removal if it's group quota is exceeded
1171 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1172 # create group with 100MB quota
1173 group
= self
._generate
_random
_group
_name
()
1174 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1175 "--size", str(osize
), "--mode=777")
1177 # make sure it exists
1178 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1179 self
.assertNotEqual(grouppath
, None)
1181 # create subvolume under the group
1182 subvolname
= self
._generate
_random
_subvolume
_name
()
1183 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1184 "--group_name", group
, "--mode=777")
1186 # make sure it exists
1187 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1188 self
.assertNotEqual(subvolpath
, None)
1190 # create 99 files of 1MB to exceed quota
1191 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=99)
1194 # write two files of 1MB file to exceed the quota
1195 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1196 # For quota to be enforced
1198 # create 400 files of 1MB to exceed quota
1199 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=400)
1200 except CommandFailedError
:
1201 # Delete subvolume when group quota is exceeded
1202 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1204 self
.fail(f
"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1207 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1209 # verify trash dir is clean
1210 self
._wait
_for
_trash
_empty
()
1212 def test_subvolume_group_quota_exceeded_subvolume_removal_retained_snaps(self
):
1214 Tests retained snapshot subvolume removal if it's group quota is exceeded
1216 group
= self
._generate
_random
_group
_name
()
1217 subvolname
= self
._generate
_random
_subvolume
_name
()
1218 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
1220 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1221 # create group with 100MB quota
1222 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1223 "--size", str(osize
), "--mode=777")
1225 # make sure it exists
1226 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1227 self
.assertNotEqual(grouppath
, None)
1229 # create subvolume under the group
1230 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1231 "--group_name", group
, "--mode=777")
1233 # make sure it exists
1234 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1235 self
.assertNotEqual(subvolpath
, None)
1237 # create 99 files of 1MB to exceed quota
1238 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=99)
1240 # snapshot subvolume
1241 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot1
, "--group_name", group
)
1242 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot2
, "--group_name", group
)
1245 # write two files of 1MB file to exceed the quota
1246 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1247 # For quota to be enforced
1249 # create 400 files of 1MB to exceed quota
1250 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=400)
1251 except CommandFailedError
:
1252 # remove with snapshot retention
1253 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
, "--retain-snapshots")
1255 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot1
, "--group_name", group
)
1256 # remove snapshot2 (should remove volume)
1257 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot2
, "--group_name", group
)
1258 # verify subvolume trash is clean
1259 self
._wait
_for
_subvol
_trash
_empty
(subvolname
, group
=group
)
1261 self
.fail(f
"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1264 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1266 # verify trash dir is clean
1267 self
._wait
_for
_trash
_empty
()
1269 def test_subvolume_group_quota_subvolume_removal(self
):
1271 Tests subvolume removal if it's group quota is set.
1273 # create group with size -- should set quota
1274 group
= self
._generate
_random
_group
_name
()
1275 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
1277 # create subvolume under the group
1278 subvolname
= self
._generate
_random
_subvolume
_name
()
1279 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
1283 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1284 except CommandFailedError
:
1285 self
.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1287 # remove subvolumegroup
1288 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1290 # verify trash dir is clean
1291 self
._wait
_for
_trash
_empty
()
1293 def test_subvolume_group_quota_legacy_subvolume_removal(self
):
1295 Tests legacy subvolume removal if it's group quota is set.
1297 subvolume
= self
._generate
_random
_subvolume
_name
()
1298 group
= self
._generate
_random
_group
_name
()
1300 # emulate a old-fashioned subvolume -- in a custom group
1301 createpath1
= os
.path
.join(".", "volumes", group
, subvolume
)
1302 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath1
], omit_sudo
=False)
1304 # this would auto-upgrade on access without anyone noticing
1305 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
, "--group-name", group
)
1306 self
.assertNotEqual(subvolpath1
, None)
1307 subvolpath1
= subvolpath1
.rstrip() # remove "/" prefix and any trailing newline
1309 # and... the subvolume path returned should be what we created behind the scene
1310 self
.assertEqual(createpath1
[1:], subvolpath1
)
1312 # Set subvolumegroup quota on idempotent subvolumegroup creation
1313 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
1317 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1318 except CommandFailedError
:
1319 self
.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1321 # remove subvolumegroup
1322 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1324 # verify trash dir is clean
1325 self
._wait
_for
_trash
_empty
()
1327 def test_subvolume_group_quota_v1_subvolume_removal(self
):
1329 Tests v1 subvolume removal if it's group quota is set.
1331 subvolume
= self
._generate
_random
_subvolume
_name
()
1332 group
= self
._generate
_random
_group
_name
()
1334 # emulate a v1 subvolume -- in a custom group
1335 self
._create
_v
1_subvolume
(subvolume
, subvol_group
=group
, has_snapshot
=False)
1337 # Set subvolumegroup quota on idempotent subvolumegroup creation
1338 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
1342 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1343 except CommandFailedError
:
1344 self
.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1346 # remove subvolumegroup
1347 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1349 # verify trash dir is clean
1350 self
._wait
_for
_trash
_empty
()
1352 def test_subvolume_group_resize_fail_invalid_size(self
):
1354 That a subvolume group cannot be resized to an invalid size and the quota did not change
1357 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1358 # create group with 1MB quota
1359 group
= self
._generate
_random
_group
_name
()
1360 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--size", str(osize
))
1362 # make sure it exists
1363 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1364 self
.assertNotEqual(grouppath
, None)
1366 # try to resize the subvolume with an invalid size -10
1369 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1370 except CommandFailedError
as ce
:
1371 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
1372 "invalid error code on resize of subvolume group with invalid size")
1374 self
.fail("expected the 'fs subvolumegroup resize' command to fail")
1376 # verify the quota did not change
1377 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1378 self
.assertEqual(size
, osize
)
1381 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1383 def test_subvolume_group_resize_fail_zero_size(self
):
1385 That a subvolume group cannot be resized to a zero size and the quota did not change
1388 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1389 # create group with 1MB quota
1390 group
= self
._generate
_random
_group
_name
()
1391 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--size", str(osize
))
1393 # make sure it exists
1394 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1395 self
.assertNotEqual(grouppath
, None)
1397 # try to resize the subvolume group with size 0
1400 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1401 except CommandFailedError
as ce
:
1402 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
1403 "invalid error code on resize of subvolume group with invalid size")
1405 self
.fail("expected the 'fs subvolumegroup resize' command to fail")
1407 # verify the quota did not change
1408 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1409 self
.assertEqual(size
, osize
)
1412 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1414 def test_subvolume_group_resize_quota_lt_used_size(self
):
1416 That a subvolume group can be resized to a size smaller than the current used size
1417 and the resulting quota matches the expected size.
1420 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
1421 # create group with 20MB quota
1422 group
= self
._generate
_random
_group
_name
()
1423 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1424 "--size", str(osize
), "--mode=777")
1426 # make sure it exists
1427 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1428 self
.assertNotEqual(grouppath
, None)
1430 # create subvolume under the group
1431 subvolname
= self
._generate
_random
_subvolume
_name
()
1432 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1433 "--group_name", group
, "--mode=777")
1435 # make sure it exists
1436 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1437 self
.assertNotEqual(subvolpath
, None)
1439 # create one file of 10MB
1440 file_size
=self
.DEFAULT_FILE_SIZE
*10
1442 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
1445 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+1)
1446 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
1448 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
1450 # shrink the subvolume group
1451 nsize
= usedsize
// 2
1453 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1454 except CommandFailedError
:
1455 self
.fail("expected the 'fs subvolumegroup resize' command to succeed")
1458 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1459 self
.assertEqual(size
, nsize
)
1461 # remove subvolume and group
1462 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1463 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1465 # verify trash dir is clean
1466 self
._wait
_for
_trash
_empty
()
1468 def test_subvolume_group_resize_fail_quota_lt_used_size_no_shrink(self
):
1470 That a subvolume group cannot be resized to a size smaller than the current used size
1471 when --no_shrink is given and the quota did not change.
1474 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
1475 # create group with 20MB quota
1476 group
= self
._generate
_random
_group
_name
()
1477 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1478 "--size", str(osize
), "--mode=777")
1480 # make sure it exists
1481 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1482 self
.assertNotEqual(grouppath
, None)
1484 # create subvolume under the group
1485 subvolname
= self
._generate
_random
_subvolume
_name
()
1486 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1487 "--group_name", group
, "--mode=777")
1489 # make sure it exists
1490 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1491 self
.assertNotEqual(subvolpath
, None)
1493 # create one file of 10MB
1494 file_size
=self
.DEFAULT_FILE_SIZE
*10
1496 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
1499 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+2)
1500 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
1502 usedsize
= int(self
.mount_a
.getfattr(grouppath
, "ceph.dir.rbytes"))
1504 # shrink the subvolume group
1505 nsize
= usedsize
// 2
1507 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
), "--no_shrink")
1508 except CommandFailedError
as ce
:
1509 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolumegroup with quota less than used")
1511 self
.fail("expected the 'fs subvolumegroup resize' command to fail")
1513 # verify the quota did not change
1514 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1515 self
.assertEqual(size
, osize
)
1517 # remove subvolume and group
1518 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1519 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1521 # verify trash dir is clean
1522 self
._wait
_for
_trash
_empty
()
1524 def test_subvolume_group_resize_expand_on_full_subvolume(self
):
1526 That the subvolume group can be expanded after it is full and future write succeed
1529 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1530 # create group with 100MB quota
1531 group
= self
._generate
_random
_group
_name
()
1532 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1533 "--size", str(osize
), "--mode=777")
1535 # make sure it exists
1536 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1537 self
.assertNotEqual(grouppath
, None)
1539 # create subvolume under the group
1540 subvolname
= self
._generate
_random
_subvolume
_name
()
1541 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1542 "--group_name", group
, "--mode=777")
1544 # make sure it exists
1545 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1546 self
.assertNotEqual(subvolpath
, None)
1548 # create 99 files of 1MB
1549 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=99)
1552 # write two files of 1MB file to exceed the quota
1553 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1554 # For quota to be enforced
1556 # create 500 files of 1MB
1557 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1558 except CommandFailedError
:
1559 # Not able to write. So expand the subvolumegroup more and try writing the files again
1561 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1563 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1564 except CommandFailedError
:
1565 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1566 "to succeed".format(subvolname
))
1568 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1569 "to fail".format(subvolname
))
1571 # remove subvolume and group
1572 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1573 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1575 # verify trash dir is clean
1576 self
._wait
_for
_trash
_empty
()
1578 def test_subvolume_group_resize_infinite_size(self
):
1580 That a subvolume group can be resized to an infinite size by unsetting its quota.
1583 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1585 group
= self
._generate
_random
_group
_name
()
1586 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1587 "--size", str(osize
))
1589 # make sure it exists
1590 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1591 self
.assertNotEqual(grouppath
, None)
1594 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, "inf")
1596 # verify that the quota is None
1597 size
= self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes")
1598 self
.assertEqual(size
, None)
1600 # remove subvolume group
1601 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1603 def test_subvolume_group_resize_infinite_size_future_writes(self
):
1605 That a subvolume group can be resized to an infinite size and the future writes succeed.
1608 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*5
1609 # create group with 5MB quota
1610 group
= self
._generate
_random
_group
_name
()
1611 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1612 "--size", str(osize
), "--mode=777")
1614 # make sure it exists
1615 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1616 self
.assertNotEqual(grouppath
, None)
1618 # create subvolume under the group
1619 subvolname
= self
._generate
_random
_subvolume
_name
()
1620 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1621 "--group_name", group
, "--mode=777")
1623 # make sure it exists
1624 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1625 self
.assertNotEqual(subvolpath
, None)
1627 # create 4 files of 1MB
1628 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=4)
1631 # write two files of 1MB file to exceed the quota
1632 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1633 # For quota to be enforced
1635 # create 500 files of 1MB
1636 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1637 except CommandFailedError
:
1638 # Not able to write. So resize subvolumegroup to 'inf' and try writing the files again
1640 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, "inf")
1642 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1643 except CommandFailedError
:
1644 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1645 "to succeed".format(subvolname
))
1647 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1648 "to fail".format(subvolname
))
1651 # verify that the quota is None
1652 size
= self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes")
1653 self
.assertEqual(size
, None)
1655 # remove subvolume and group
1656 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1657 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1659 # verify trash dir is clean
1660 self
._wait
_for
_trash
_empty
()
1662 def test_subvolume_group_ls(self
):
1663 # tests the 'fs subvolumegroup ls' command
1665 subvolumegroups
= []
1667 #create subvolumegroups
1668 subvolumegroups
= self
._generate
_random
_group
_name
(3)
1669 for groupname
in subvolumegroups
:
1670 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
1672 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1673 if len(subvolumegroupls
) == 0:
1674 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
1676 subvolgroupnames
= [subvolumegroup
['name'] for subvolumegroup
in subvolumegroupls
]
1677 if collections
.Counter(subvolgroupnames
) != collections
.Counter(subvolumegroups
):
1678 raise RuntimeError("Error creating or listing subvolume groups")
1680 def test_subvolume_group_ls_filter(self
):
1681 # tests the 'fs subvolumegroup ls' command filters '_deleting' directory
1683 subvolumegroups
= []
1685 #create subvolumegroup
1686 subvolumegroups
= self
._generate
_random
_group
_name
(3)
1687 for groupname
in subvolumegroups
:
1688 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
1690 # create subvolume and remove. This creates '_deleting' directory.
1691 subvolume
= self
._generate
_random
_subvolume
_name
()
1692 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1693 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1695 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1696 subvolgroupnames
= [subvolumegroup
['name'] for subvolumegroup
in subvolumegroupls
]
1697 if "_deleting" in subvolgroupnames
:
1698 self
.fail("Listing subvolume groups listed '_deleting' directory")
1700 def test_subvolume_group_ls_filter_internal_directories(self
):
1701 # tests the 'fs subvolumegroup ls' command filters internal directories
1702 # eg: '_deleting', '_nogroup', '_index', "_legacy"
1704 subvolumegroups
= self
._generate
_random
_group
_name
(3)
1705 subvolume
= self
._generate
_random
_subvolume
_name
()
1706 snapshot
= self
._generate
_random
_snapshot
_name
()
1707 clone
= self
._generate
_random
_clone
_name
()
1709 #create subvolumegroups
1710 for groupname
in subvolumegroups
:
1711 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
1713 # create subvolume which will create '_nogroup' directory
1714 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1717 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
1719 # clone snapshot which will create '_index' directory
1720 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
1722 # wait for clone to complete
1723 self
._wait
_for
_clone
_to
_complete
(clone
)
1726 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
1728 # remove subvolume which will create '_deleting' directory
1729 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1731 # list subvolumegroups
1732 ret
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1733 self
.assertEqual(len(ret
), len(subvolumegroups
))
1735 ret_list
= [subvolumegroup
['name'] for subvolumegroup
in ret
]
1736 self
.assertEqual(len(ret_list
), len(subvolumegroups
))
1738 self
.assertEqual(all(elem
in subvolumegroups
for elem
in ret_list
), True)
1741 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
1742 for groupname
in subvolumegroups
:
1743 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, groupname
)
1745 def test_subvolume_group_ls_for_nonexistent_volume(self
):
1746 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
1747 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
1749 # list subvolume groups
1750 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1751 if len(subvolumegroupls
) > 0:
1752 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
1754 def test_subvolumegroup_pin_distributed(self
):
1755 self
.fs
.set_max_mds(2)
1756 status
= self
.fs
.wait_for_daemons()
1757 self
.config_set('mds', 'mds_export_ephemeral_distributed', True)
1760 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1761 self
._fs
_cmd
("subvolumegroup", "pin", self
.volname
, group
, "distributed", "True")
1762 subvolumes
= self
._generate
_random
_subvolume
_name
(50)
1763 for subvolume
in subvolumes
:
1764 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1765 self
._wait
_distributed
_subtrees
(2 * 2, status
=status
, rank
="all")
1768 for subvolume
in subvolumes
:
1769 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
1771 # verify trash dir is clean
1772 self
._wait
_for
_trash
_empty
()
1774 def test_subvolume_group_rm_force(self
):
1775 # test removing non-existing subvolume group with --force
1776 group
= self
._generate
_random
_group
_name
()
1778 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
, "--force")
1779 except CommandFailedError
:
1780 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
1782 def test_subvolume_group_exists_with_subvolumegroup_and_no_subvolume(self
):
1783 """Test the presence of any subvolumegroup when only subvolumegroup is present"""
1785 group
= self
._generate
_random
_group
_name
()
1786 # create subvolumegroup
1787 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1788 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1789 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1790 # delete subvolumegroup
1791 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1792 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1793 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1795 def test_subvolume_group_exists_with_no_subvolumegroup_and_subvolume(self
):
1796 """Test the presence of any subvolumegroup when no subvolumegroup is present"""
1798 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1799 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1801 def test_subvolume_group_exists_with_subvolumegroup_and_subvolume(self
):
1802 """Test the presence of any subvolume when subvolumegroup
1803 and subvolume both are present"""
1805 group
= self
._generate
_random
_group
_name
()
1806 subvolume
= self
._generate
_random
_subvolume
_name
(2)
1807 # create subvolumegroup
1808 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1809 # create subvolume in group
1810 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
[0], "--group_name", group
)
1812 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
[1])
1813 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1814 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1815 # delete subvolume in group
1816 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
[0], "--group_name", group
)
1817 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1818 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1820 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
[1])
1821 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1822 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1823 # delete subvolumegroup
1824 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1825 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1826 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1828 def test_subvolume_group_exists_without_subvolumegroup_and_with_subvolume(self
):
1829 """Test the presence of any subvolume when subvolume is present
1830 but no subvolumegroup is present"""
1832 subvolume
= self
._generate
_random
_subvolume
_name
()
1834 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1835 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1836 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1838 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1839 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1840 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1843 class TestSubvolumes(TestVolumesHelper
):
1844 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
1845 def test_async_subvolume_rm(self
):
1846 subvolumes
= self
._generate
_random
_subvolume
_name
(100)
1849 for subvolume
in subvolumes
:
1850 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
1851 self
._do
_subvolume
_io
(subvolume
, number_of_files
=10)
1853 self
.mount_a
.umount_wait()
1856 for subvolume
in subvolumes
:
1857 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1859 self
.mount_a
.mount_wait()
1861 # verify trash dir is clean
1862 self
._wait
_for
_trash
_empty
(timeout
=300)
1864 def test_default_uid_gid_subvolume(self
):
1865 subvolume
= self
._generate
_random
_subvolume
_name
()
1870 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1871 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
1873 # check subvolume's uid and gid
1874 stat
= self
.mount_a
.stat(subvol_path
)
1875 self
.assertEqual(stat
['st_uid'], expected_uid
)
1876 self
.assertEqual(stat
['st_gid'], expected_gid
)
1879 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1881 # verify trash dir is clean
1882 self
._wait
_for
_trash
_empty
()
1884 def test_nonexistent_subvolume_rm(self
):
1885 # remove non-existing subvolume
1886 subvolume
= "non_existent_subvolume"
1888 # try, remove subvolume
1890 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1891 except CommandFailedError
as ce
:
1892 if ce
.exitstatus
!= errno
.ENOENT
:
1895 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
1897 def test_subvolume_create_and_rm(self
):
1899 subvolume
= self
._generate
_random
_subvolume
_name
()
1900 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1902 # make sure it exists
1903 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1904 self
.assertNotEqual(subvolpath
, None)
1907 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1908 # make sure its gone
1910 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1911 except CommandFailedError
as ce
:
1912 if ce
.exitstatus
!= errno
.ENOENT
:
1915 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
1917 # verify trash dir is clean
1918 self
._wait
_for
_trash
_empty
()
1920 def test_subvolume_create_and_rm_in_group(self
):
1921 subvolume
= self
._generate
_random
_subvolume
_name
()
1922 group
= self
._generate
_random
_group
_name
()
1925 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1927 # create subvolume in group
1928 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1931 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
1933 # verify trash dir is clean
1934 self
._wait
_for
_trash
_empty
()
1937 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1939 def test_subvolume_create_idempotence(self
):
1941 subvolume
= self
._generate
_random
_subvolume
_name
()
1942 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1944 # try creating w/ same subvolume name -- should be idempotent
1945 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1948 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1950 # verify trash dir is clean
1951 self
._wait
_for
_trash
_empty
()
1953 def test_subvolume_create_idempotence_resize(self
):
1955 subvolume
= self
._generate
_random
_subvolume
_name
()
1956 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1958 # try creating w/ same subvolume name with size -- should set quota
1959 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "1000000000")
1961 # get subvolume metadata
1962 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
1963 self
.assertEqual(subvol_info
["bytes_quota"], 1000000000)
1966 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1968 # verify trash dir is clean
1969 self
._wait
_for
_trash
_empty
()
1971 def test_subvolume_create_idempotence_mode(self
):
1973 default_mode
= "755"
1976 subvolume
= self
._generate
_random
_subvolume
_name
()
1977 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1979 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
1981 actual_mode_1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
1982 self
.assertEqual(actual_mode_1
, default_mode
)
1984 # try creating w/ same subvolume name with --mode 777
1986 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", new_mode
)
1988 actual_mode_2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
1989 self
.assertEqual(actual_mode_2
, new_mode
)
1992 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1994 # verify trash dir is clean
1995 self
._wait
_for
_trash
_empty
()
1997 def test_subvolume_create_idempotence_without_passing_mode(self
):
1999 desired_mode
= "777"
2000 subvolume
= self
._generate
_random
_subvolume
_name
()
2001 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", desired_mode
)
2003 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
2005 actual_mode_1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
2006 self
.assertEqual(actual_mode_1
, desired_mode
)
2009 default_mode
= "755"
2011 # try creating w/ same subvolume name without passing --mode argument
2012 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2014 actual_mode_2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
2015 self
.assertEqual(actual_mode_2
, default_mode
)
2018 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2020 # verify trash dir is clean
2021 self
._wait
_for
_trash
_empty
()
2023 def test_subvolume_create_isolated_namespace(self
):
2025 Create subvolume in separate rados namespace
2029 subvolume
= self
._generate
_random
_subvolume
_name
()
2030 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--namespace-isolated")
2032 # get subvolume metadata
2033 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
2034 self
.assertNotEqual(len(subvol_info
), 0)
2035 self
.assertEqual(subvol_info
["pool_namespace"], "fsvolumens_" + subvolume
)
2038 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2040 # verify trash dir is clean
2041 self
._wait
_for
_trash
_empty
()
2043 def test_subvolume_create_with_auto_cleanup_on_fail(self
):
2044 subvolume
= self
._generate
_random
_subvolume
_name
()
2045 data_pool
= "invalid_pool"
2046 # create subvolume with invalid data pool layout fails
2047 with self
.assertRaises(CommandFailedError
):
2048 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
2050 # check whether subvol path is cleaned up
2052 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
2053 except CommandFailedError
as ce
:
2054 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of non-existent subvolume")
2056 self
.fail("expected the 'fs subvolume getpath' command to fail")
2058 # verify trash dir is clean
2059 self
._wait
_for
_trash
_empty
()
2061 def test_subvolume_create_with_desired_data_pool_layout_in_group(self
):
2062 subvol1
, subvol2
= self
._generate
_random
_subvolume
_name
(2)
2063 group
= self
._generate
_random
_group
_name
()
2065 # create group. this also helps set default pool layout for subvolumes
2066 # created within the group.
2067 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2069 # create subvolume in group.
2070 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
2071 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
, group_name
=group
)
2073 default_pool
= self
.mount_a
.getfattr(subvol1_path
, "ceph.dir.layout.pool")
2074 new_pool
= "new_pool"
2075 self
.assertNotEqual(default_pool
, new_pool
)
2078 newid
= self
.fs
.add_data_pool(new_pool
)
2080 # create subvolume specifying the new data pool as its pool layout
2081 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
,
2082 "--pool_layout", new_pool
)
2083 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvol2
, group_name
=group
)
2085 desired_pool
= self
.mount_a
.getfattr(subvol2_path
, "ceph.dir.layout.pool")
2087 self
.assertEqual(desired_pool
, new_pool
)
2088 except AssertionError:
2089 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
2091 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
2092 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
2093 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2095 # verify trash dir is clean
2096 self
._wait
_for
_trash
_empty
()
2098 def test_subvolume_create_with_desired_mode(self
):
2099 subvol1
= self
._generate
_random
_subvolume
_name
()
2102 default_mode
= "755"
2104 desired_mode
= "777"
2106 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--mode", "777")
2108 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
)
2110 # check subvolumegroup's mode
2111 subvol_par_path
= os
.path
.dirname(subvol1_path
)
2112 group_path
= os
.path
.dirname(subvol_par_path
)
2113 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', group_path
]).stdout
.getvalue().strip()
2114 self
.assertEqual(actual_mode1
, default_mode
)
2115 # check /volumes mode
2116 volumes_path
= os
.path
.dirname(group_path
)
2117 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', volumes_path
]).stdout
.getvalue().strip()
2118 self
.assertEqual(actual_mode2
, default_mode
)
2119 # check subvolume's mode
2120 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol1_path
]).stdout
.getvalue().strip()
2121 self
.assertEqual(actual_mode3
, desired_mode
)
2123 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
)
2125 # verify trash dir is clean
2126 self
._wait
_for
_trash
_empty
()
2128 def test_subvolume_create_with_desired_mode_in_group(self
):
2129 subvol1
, subvol2
, subvol3
= self
._generate
_random
_subvolume
_name
(3)
2131 group
= self
._generate
_random
_group
_name
()
2133 expected_mode1
= "755"
2135 expected_mode2
= "777"
2138 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2140 # create subvolume in group
2141 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
2142 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
, "--mode", "777")
2143 # check whether mode 0777 also works
2144 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol3
, "--group_name", group
, "--mode", "0777")
2146 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
, group_name
=group
)
2147 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvol2
, group_name
=group
)
2148 subvol3_path
= self
._get
_subvolume
_path
(self
.volname
, subvol3
, group_name
=group
)
2150 # check subvolume's mode
2151 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol1_path
]).stdout
.getvalue().strip()
2152 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol2_path
]).stdout
.getvalue().strip()
2153 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol3_path
]).stdout
.getvalue().strip()
2154 self
.assertEqual(actual_mode1
, expected_mode1
)
2155 self
.assertEqual(actual_mode2
, expected_mode2
)
2156 self
.assertEqual(actual_mode3
, expected_mode2
)
2158 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
2159 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
2160 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol3
, group
)
2161 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2163 # verify trash dir is clean
2164 self
._wait
_for
_trash
_empty
()
2166 def test_subvolume_create_with_desired_uid_gid(self
):
2168 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
2175 subvolname
= self
._generate
_random
_subvolume
_name
()
2176 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--uid", str(uid
), "--gid", str(gid
))
2178 # make sure it exists
2179 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2180 self
.assertNotEqual(subvolpath
, None)
2182 # verify the uid and gid
2183 suid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', subvolpath
]).stdout
.getvalue().strip())
2184 sgid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', subvolpath
]).stdout
.getvalue().strip())
2185 self
.assertEqual(uid
, suid
)
2186 self
.assertEqual(gid
, sgid
)
2189 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2191 # verify trash dir is clean
2192 self
._wait
_for
_trash
_empty
()
2194 def test_subvolume_create_with_invalid_data_pool_layout(self
):
2195 subvolume
= self
._generate
_random
_subvolume
_name
()
2196 data_pool
= "invalid_pool"
2197 # create subvolume with invalid data pool layout
2199 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
2200 except CommandFailedError
as ce
:
2201 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on create of subvolume with invalid pool layout")
2203 self
.fail("expected the 'fs subvolume create' command to fail")
2205 # verify trash dir is clean
2206 self
._wait
_for
_trash
_empty
()
2208 def test_subvolume_create_with_invalid_size(self
):
2209 # create subvolume with an invalid size -1
2210 subvolume
= self
._generate
_random
_subvolume
_name
()
2212 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--size", "-1")
2213 except CommandFailedError
as ce
:
2214 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on create of subvolume with invalid size")
2216 self
.fail("expected the 'fs subvolume create' command to fail")
2218 # verify trash dir is clean
2219 self
._wait
_for
_trash
_empty
()
2221 def test_subvolume_create_and_ls_providing_group_as_nogroup(self
):
2223 That a 'subvolume create' and 'subvolume ls' should throw
2224 permission denied error if option --group=_nogroup is provided.
2227 subvolname
= self
._generate
_random
_subvolume
_name
()
2229 # try to create subvolume providing --group_name=_nogroup option
2231 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", "_nogroup")
2232 except CommandFailedError
as ce
:
2233 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
)
2235 self
.fail("expected the 'fs subvolume create' command to fail")
2238 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
)
2240 # try to list subvolumes providing --group_name=_nogroup option
2242 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_nogroup")
2243 except CommandFailedError
as ce
:
2244 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
)
2246 self
.fail("expected the 'fs subvolume ls' command to fail")
2249 self
._fs
_cmd
("subvolume", "ls", self
.volname
)
2251 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2253 # verify trash dir is clean.
2254 self
._wait
_for
_trash
_empty
()
2256 def test_subvolume_expand(self
):
2258 That a subvolume can be expanded in size and its quota matches the expected size.
2262 subvolname
= self
._generate
_random
_subvolume
_name
()
2263 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2264 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
2266 # make sure it exists
2267 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2268 self
.assertNotEqual(subvolpath
, None)
2270 # expand the subvolume
2272 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2275 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2276 self
.assertEqual(size
, nsize
)
2279 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2281 # verify trash dir is clean
2282 self
._wait
_for
_trash
_empty
()
2284 def test_subvolume_info(self
):
2285 # tests the 'fs subvolume info' command
2287 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
2288 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
2289 "type", "uid", "features", "state"]
2292 subvolume
= self
._generate
_random
_subvolume
_name
()
2293 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2295 # get subvolume metadata
2296 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
2297 for md
in subvol_md
:
2298 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
2300 self
.assertEqual(subvol_info
["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
2301 self
.assertEqual(subvol_info
["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
2302 self
.assertEqual(subvol_info
["pool_namespace"], "", "expected pool namespace to be empty")
2303 self
.assertEqual(subvol_info
["state"], "complete", "expected state to be complete")
2305 self
.assertEqual(len(subvol_info
["features"]), 3,
2306 msg
="expected 3 features, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
2307 for feature
in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2308 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
2310 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2311 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
2313 # get subvolume metadata after quota set
2314 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
2315 for md
in subvol_md
:
2316 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
2318 self
.assertNotEqual(subvol_info
["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
2319 self
.assertEqual(subvol_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
2320 self
.assertEqual(subvol_info
["type"], "subvolume", "type should be set to subvolume")
2321 self
.assertEqual(subvol_info
["state"], "complete", "expected state to be complete")
2323 self
.assertEqual(len(subvol_info
["features"]), 3,
2324 msg
="expected 3 features, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
2325 for feature
in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2326 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
2329 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2331 # verify trash dir is clean
2332 self
._wait
_for
_trash
_empty
()
2334 def test_subvolume_ls(self
):
2335 # tests the 'fs subvolume ls' command
2340 subvolumes
= self
._generate
_random
_subvolume
_name
(3)
2341 for subvolume
in subvolumes
:
2342 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2345 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
2346 if len(subvolumels
) == 0:
2347 self
.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
2349 subvolnames
= [subvolume
['name'] for subvolume
in subvolumels
]
2350 if collections
.Counter(subvolnames
) != collections
.Counter(subvolumes
):
2351 self
.fail("Error creating or listing subvolumes")
2354 for subvolume
in subvolumes
:
2355 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2357 # verify trash dir is clean
2358 self
._wait
_for
_trash
_empty
()
2360 def test_subvolume_ls_with_groupname_as_internal_directory(self
):
2361 # tests the 'fs subvolume ls' command when the default groupname as internal directories
2362 # Eg: '_nogroup', '_legacy', '_deleting', '_index'.
2363 # Expecting 'fs subvolume ls' will be fail with errno EINVAL for '_legacy', '_deleting', '_index'
2364 # Expecting 'fs subvolume ls' will be fail with errno EPERM for '_nogroup'
2366 # try to list subvolumes providing --group_name=_nogroup option
2368 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_nogroup")
2369 except CommandFailedError
as ce
:
2370 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
)
2372 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EPERM' for _nogroup")
2374 # try to list subvolumes providing --group_name=_legacy option
2376 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_legacy")
2377 except CommandFailedError
as ce
:
2378 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
)
2380 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _legacy")
2382 # try to list subvolumes providing --group_name=_deleting option
2384 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_deleting")
2385 except CommandFailedError
as ce
:
2386 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
)
2388 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _deleting")
2390 # try to list subvolumes providing --group_name=_index option
2392 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_index")
2393 except CommandFailedError
as ce
:
2394 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
)
2396 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _index")
2398 def test_subvolume_ls_for_notexistent_default_group(self
):
2399 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
2400 # prerequisite: we expect that the volume is created and the default group _nogroup is
2401 # NOT created (i.e. a subvolume without group is not created)
2404 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
2405 if len(subvolumels
) > 0:
2406 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
2408 def test_subvolume_marked(self
):
2410 ensure a subvolume is marked with the ceph.dir.subvolume xattr
2412 subvolume
= self
._generate
_random
_subvolume
_name
()
2415 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2418 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
2420 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
2421 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
2422 # outside the subvolume
2423 dstpath
= os
.path
.join(self
.mount_a
.mountpoint
, 'volumes', '_nogroup', 'new_subvol_location')
2424 srcpath
= os
.path
.join(self
.mount_a
.mountpoint
, subvolpath
)
2425 rename_script
= dedent("""
2429 os.rename("{src}", "{dst}")
2430 except OSError as e:
2431 if e.errno != errno.EXDEV:
2432 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
2434 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
2436 self
.mount_a
.run_python(rename_script
.format(src
=srcpath
, dst
=dstpath
), sudo
=True)
2439 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2441 # verify trash dir is clean
2442 self
._wait
_for
_trash
_empty
()
2444 def test_subvolume_pin_export(self
):
2445 self
.fs
.set_max_mds(2)
2446 status
= self
.fs
.wait_for_daemons()
2448 subvolume
= self
._generate
_random
_subvolume
_name
()
2449 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2450 self
._fs
_cmd
("subvolume", "pin", self
.volname
, subvolume
, "export", "1")
2451 path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
2452 path
= os
.path
.dirname(path
) # get subvolume path
2454 self
._get
_subtrees
(status
=status
, rank
=1)
2455 self
._wait
_subtrees
([(path
, 1)], status
=status
)
2458 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2460 # verify trash dir is clean
2461 self
._wait
_for
_trash
_empty
()
2463 ### authorize operations
2465 def test_authorize_deauthorize_legacy_subvolume(self
):
2466 subvolume
= self
._generate
_random
_subvolume
_name
()
2467 group
= self
._generate
_random
_group
_name
()
2470 guest_mount
= self
.mount_b
2471 guest_mount
.umount_wait()
2473 # emulate a old-fashioned subvolume in a custom group
2474 createpath
= os
.path
.join(".", "volumes", group
, subvolume
)
2475 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
2477 # add required xattrs to subvolume
2478 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
2479 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
2481 mount_path
= os
.path
.join("/", "volumes", group
, subvolume
)
2483 # authorize guest authID read-write access to subvolume
2484 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2485 "--group_name", group
, "--tenant_id", "tenant_id")
2487 # guest authID should exist
2488 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2489 self
.assertIn("client.{0}".format(authid
), existing_ids
)
2491 # configure credentials for guest client
2492 self
._configure
_guest
_auth
(guest_mount
, authid
, key
)
2494 # mount the subvolume, and write to it
2495 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2496 guest_mount
.write_n_mb("data.bin", 1)
2498 # authorize guest authID read access to subvolume
2499 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2500 "--group_name", group
, "--tenant_id", "tenant_id", "--access_level", "r")
2502 # guest client sees the change in access level to read only after a
2503 # remount of the subvolume.
2504 guest_mount
.umount_wait()
2505 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2507 # read existing content of the subvolume
2508 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
2509 # cannot write into read-only subvolume
2510 with self
.assertRaises(CommandFailedError
):
2511 guest_mount
.write_n_mb("rogue.bin", 1)
2514 guest_mount
.umount_wait()
2515 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid
,
2516 "--group_name", group
)
2517 # guest authID should no longer exist
2518 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2519 self
.assertNotIn("client.{0}".format(authid
), existing_ids
)
2520 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2521 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2523 def test_authorize_deauthorize_subvolume(self
):
2524 subvolume
= self
._generate
_random
_subvolume
_name
()
2525 group
= self
._generate
_random
_group
_name
()
2528 guest_mount
= self
.mount_b
2529 guest_mount
.umount_wait()
2532 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--mode=777")
2534 # create subvolume in group
2535 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2536 mount_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
,
2537 "--group_name", group
).rstrip()
2539 # authorize guest authID read-write access to subvolume
2540 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2541 "--group_name", group
, "--tenant_id", "tenant_id")
2543 # guest authID should exist
2544 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2545 self
.assertIn("client.{0}".format(authid
), existing_ids
)
2547 # configure credentials for guest client
2548 self
._configure
_guest
_auth
(guest_mount
, authid
, key
)
2550 # mount the subvolume, and write to it
2551 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2552 guest_mount
.write_n_mb("data.bin", 1)
2554 # authorize guest authID read access to subvolume
2555 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2556 "--group_name", group
, "--tenant_id", "tenant_id", "--access_level", "r")
2558 # guest client sees the change in access level to read only after a
2559 # remount of the subvolume.
2560 guest_mount
.umount_wait()
2561 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2563 # read existing content of the subvolume
2564 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
2565 # cannot write into read-only subvolume
2566 with self
.assertRaises(CommandFailedError
):
2567 guest_mount
.write_n_mb("rogue.bin", 1)
2570 guest_mount
.umount_wait()
2571 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid
,
2572 "--group_name", group
)
2573 # guest authID should no longer exist
2574 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2575 self
.assertNotIn("client.{0}".format(authid
), existing_ids
)
2576 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2577 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2579 def test_multitenant_subvolumes(self
):
2581 That subvolume access can be restricted to a tenant.
2583 That metadata used to enforce tenant isolation of
2584 subvolumes is stored as a two-way mapping between auth
2585 IDs and subvolumes that they're authorized to access.
2587 subvolume
= self
._generate
_random
_subvolume
_name
()
2588 group
= self
._generate
_random
_group
_name
()
2590 guest_mount
= self
.mount_b
2592 # Guest clients belonging to different tenants, but using the same
2597 "tenant_id": "tenant1",
2601 "tenant_id": "tenant2",
2605 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2607 # create subvolume in group
2608 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2610 # Check that subvolume metadata file is created on subvolume creation.
2611 subvol_metadata_filename
= "_{0}:{1}.meta".format(group
, subvolume
)
2612 self
.assertIn(subvol_metadata_filename
, guest_mount
.ls("volumes"))
2614 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
2615 # 'tenant1', with 'rw' access to the volume.
2616 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2617 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2619 # Check that auth metadata file for auth ID 'alice', is
2620 # created on authorizing 'alice' access to the subvolume.
2621 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2622 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2624 # Verify that the auth metadata file stores the tenant ID that the
2625 # auth ID belongs to, the auth ID's authorized access levels
2626 # for different subvolumes, versioning details, etc.
2627 expected_auth_metadata
= {
2629 "compat_version": 6,
2631 "tenant_id": "tenant1",
2633 "{0}/{1}".format(group
,subvolume
): {
2635 "access_level": "rw"
2640 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2641 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
2642 del expected_auth_metadata
["version"]
2643 del auth_metadata
["version"]
2644 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
2646 # Verify that the subvolume metadata file stores info about auth IDs
2647 # and their access levels to the subvolume, versioning details, etc.
2648 expected_subvol_metadata
= {
2650 "compat_version": 1,
2654 "access_level": "rw"
2658 subvol_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(subvol_metadata_filename
)))
2660 self
.assertGreaterEqual(subvol_metadata
["version"], expected_subvol_metadata
["version"])
2661 del expected_subvol_metadata
["version"]
2662 del subvol_metadata
["version"]
2663 self
.assertEqual(expected_subvol_metadata
, subvol_metadata
)
2665 # Cannot authorize 'guestclient_2' to access the volume.
2666 # It uses auth ID 'alice', which has already been used by a
2667 # 'guestclient_1' belonging to an another tenant for accessing
2671 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_2
["auth_id"],
2672 "--group_name", group
, "--tenant_id", guestclient_2
["tenant_id"])
2673 except CommandFailedError
as ce
:
2674 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
2675 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
2677 self
.fail("expected the 'fs subvolume authorize' command to fail")
2679 # Check that auth metadata file is cleaned up on removing
2680 # auth ID's only access to a volume.
2682 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
,
2683 "--group_name", group
)
2684 self
.assertNotIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2686 # Check that subvolume metadata file is cleaned up on subvolume deletion.
2687 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2688 self
.assertNotIn(subvol_metadata_filename
, guest_mount
.ls("volumes"))
2691 guest_mount
.umount_wait()
2692 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2694 def test_subvolume_authorized_list(self
):
2695 subvolume
= self
._generate
_random
_subvolume
_name
()
2696 group
= self
._generate
_random
_group
_name
()
2702 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2704 # create subvolume in group
2705 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2707 # authorize alice authID read-write access to subvolume
2708 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid1
,
2709 "--group_name", group
)
2710 # authorize guest1 authID read-write access to subvolume
2711 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid2
,
2712 "--group_name", group
)
2713 # authorize guest2 authID read access to subvolume
2714 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid3
,
2715 "--group_name", group
, "--access_level", "r")
2717 # list authorized-ids of the subvolume
2718 expected_auth_list
= [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
2719 auth_list
= json
.loads(self
._fs
_cmd
('subvolume', 'authorized_list', self
.volname
, subvolume
, "--group_name", group
))
2720 self
.assertCountEqual(expected_auth_list
, auth_list
)
2723 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid1
,
2724 "--group_name", group
)
2725 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid2
,
2726 "--group_name", group
)
2727 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid3
,
2728 "--group_name", group
)
2729 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2730 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2732 def test_authorize_auth_id_not_created_by_mgr_volumes(self
):
2734 If the auth_id already exists and is not created by mgr plugin,
2735 it's not allowed to authorize the auth-id by default.
2738 subvolume
= self
._generate
_random
_subvolume
_name
()
2739 group
= self
._generate
_random
_group
_name
()
2742 self
.fs
.mon_manager
.raw_cluster_cmd(
2743 "auth", "get-or-create", "client.guest1",
2752 "tenant_id": "tenant1",
2756 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2758 # create subvolume in group
2759 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2762 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2763 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2764 except CommandFailedError
as ce
:
2765 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
2766 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
2768 self
.fail("expected the 'fs subvolume authorize' command to fail")
2771 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2772 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2773 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2775 def test_authorize_allow_existing_id_option(self
):
2777 If the auth_id already exists and is not created by mgr volumes,
2778 it's not allowed to authorize the auth-id by default but is
2779 allowed with option allow_existing_id.
2782 subvolume
= self
._generate
_random
_subvolume
_name
()
2783 group
= self
._generate
_random
_group
_name
()
2786 self
.fs
.mon_manager
.raw_cluster_cmd(
2787 "auth", "get-or-create", "client.guest1",
2796 "tenant_id": "tenant1",
2800 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2802 # create subvolume in group
2803 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2805 # Cannot authorize 'guestclient_1' to access the volume by default,
2806 # which already exists and not created by mgr volumes but is allowed
2807 # with option 'allow_existing_id'.
2808 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2809 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"], "--allow-existing-id")
2812 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
,
2813 "--group_name", group
)
2814 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2815 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2816 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2818 def test_deauthorize_auth_id_after_out_of_band_update(self
):
2820 If the auth_id authorized by mgr/volumes plugin is updated
2821 out of band, the auth_id should not be deleted after a
2822 deauthorize. It should only remove caps associated with it.
2825 subvolume
= self
._generate
_random
_subvolume
_name
()
2826 group
= self
._generate
_random
_group
_name
()
2831 "tenant_id": "tenant1",
2835 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2837 # create subvolume in group
2838 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2840 # Authorize 'guestclient_1' to access the subvolume.
2841 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2842 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2844 subvol_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
,
2845 "--group_name", group
).rstrip()
2847 # Update caps for guestclient_1 out of band
2848 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
2849 "auth", "caps", "client.guest1",
2850 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group
, subvol_path
),
2851 "osd", "allow rw pool=cephfs_data",
2856 # Deauthorize guestclient_1
2857 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
, "--group_name", group
)
2859 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
2860 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
2861 out
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
2863 self
.assertEqual("client.guest1", out
[0]["entity"])
2864 self
.assertEqual("allow rw path=/volumes/{0}".format(group
), out
[0]["caps"]["mds"])
2865 self
.assertEqual("allow *", out
[0]["caps"]["mgr"])
2866 self
.assertNotIn("osd", out
[0]["caps"])
2869 out
= self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2870 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2871 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2873 def test_recover_auth_metadata_during_authorize(self
):
2875 That auth metadata manager can recover from partial auth updates using
2876 metadata files, which store auth info and its update status info. This
2877 test validates the recovery during authorize.
2880 guest_mount
= self
.mount_b
2882 subvolume
= self
._generate
_random
_subvolume
_name
()
2883 group
= self
._generate
_random
_group
_name
()
2888 "tenant_id": "tenant1",
2892 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2894 # create subvolume in group
2895 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2897 # Authorize 'guestclient_1' to access the subvolume.
2898 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2899 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2901 # Check that auth metadata file for auth ID 'guest1', is
2902 # created on authorizing 'guest1' access to the subvolume.
2903 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2904 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2905 expected_auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2907 # Induce partial auth update state by modifying the auth metadata file,
2908 # and then run authorize again.
2909 guest_mount
.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename
)], omit_sudo
=False)
2911 # Authorize 'guestclient_1' to access the subvolume.
2912 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2913 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2915 auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2916 self
.assertEqual(auth_metadata_content
, expected_auth_metadata_content
)
2919 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
, "--group_name", group
)
2920 guest_mount
.umount_wait()
2921 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2922 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2923 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2925 def test_recover_auth_metadata_during_deauthorize(self
):
2927 That auth metadata manager can recover from partial auth updates using
2928 metadata files, which store auth info and its update status info. This
2929 test validates the recovery during deauthorize.
2932 guest_mount
= self
.mount_b
2934 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
2935 group
= self
._generate
_random
_group
_name
()
2938 "auth_id": "guest1",
2939 "tenant_id": "tenant1",
2943 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2945 # create subvolumes in group
2946 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
2947 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
2949 # Authorize 'guestclient_1' to access the subvolume1.
2950 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
2951 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2953 # Check that auth metadata file for auth ID 'guest1', is
2954 # created on authorizing 'guest1' access to the subvolume1.
2955 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2956 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2957 expected_auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2959 # Authorize 'guestclient_1' to access the subvolume2.
2960 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
2961 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2963 # Induce partial auth update state by modifying the auth metadata file,
2964 # and then run de-authorize.
2965 guest_mount
.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename
)], omit_sudo
=False)
2967 # Deauthorize 'guestclient_1' to access the subvolume2.
2968 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
2969 "--group_name", group
)
2971 auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2972 self
.assertEqual(auth_metadata_content
, expected_auth_metadata_content
)
2975 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, "guest1", "--group_name", group
)
2976 guest_mount
.umount_wait()
2977 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2978 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
2979 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
2980 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2982 def test_update_old_style_auth_metadata_to_new_during_authorize(self
):
2984 CephVolumeClient stores the subvolume data in auth metadata file with
2985 'volumes' key as there was no subvolume namespace. It doesn't makes sense
2986 with mgr/volumes. This test validates the transparent update of 'volumes'
2987 key to 'subvolumes' key in auth metadata file during authorize.
2990 guest_mount
= self
.mount_b
2992 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
2993 group
= self
._generate
_random
_group
_name
()
2998 "tenant_id": "tenant1",
3002 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3004 # create subvolumes in group
3005 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
3006 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
3008 # Authorize 'guestclient_1' to access the subvolume1.
3009 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
3010 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3012 # Check that auth metadata file for auth ID 'guest1', is
3013 # created on authorizing 'guest1' access to the subvolume1.
3014 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
3015 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
3017 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
3018 guest_mount
.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename
)], omit_sudo
=False)
3020 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
3021 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
3022 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3024 expected_auth_metadata
= {
3026 "compat_version": 6,
3028 "tenant_id": "tenant1",
3030 "{0}/{1}".format(group
,subvolume1
): {
3032 "access_level": "rw"
3034 "{0}/{1}".format(group
,subvolume2
): {
3036 "access_level": "rw"
3041 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
3043 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
3044 del expected_auth_metadata
["version"]
3045 del auth_metadata
["version"]
3046 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
3049 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, auth_id
, "--group_name", group
)
3050 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, auth_id
, "--group_name", group
)
3051 guest_mount
.umount_wait()
3052 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
3053 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
3054 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
3055 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3057 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self
):
3059 CephVolumeClient stores the subvolume data in auth metadata file with
3060 'volumes' key as there was no subvolume namespace. It doesn't makes sense
3061 with mgr/volumes. This test validates the transparent update of 'volumes'
3062 key to 'subvolumes' key in auth metadata file during deauthorize.
3065 guest_mount
= self
.mount_b
3067 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
3068 group
= self
._generate
_random
_group
_name
()
3073 "tenant_id": "tenant1",
3077 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3079 # create subvolumes in group
3080 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
3081 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
3083 # Authorize 'guestclient_1' to access the subvolume1.
3084 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
3085 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3087 # Authorize 'guestclient_1' to access the subvolume2.
3088 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
3089 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3091 # Check that auth metadata file for auth ID 'guest1', is created.
3092 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
3093 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
3095 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
3096 guest_mount
.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename
)], omit_sudo
=False)
3098 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
3099 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, auth_id
, "--group_name", group
)
3101 expected_auth_metadata
= {
3103 "compat_version": 6,
3105 "tenant_id": "tenant1",
3107 "{0}/{1}".format(group
,subvolume1
): {
3109 "access_level": "rw"
3114 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
3116 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
3117 del expected_auth_metadata
["version"]
3118 del auth_metadata
["version"]
3119 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
3122 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, auth_id
, "--group_name", group
)
3123 guest_mount
.umount_wait()
3124 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
3125 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
3126 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
3127 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3129 def test_subvolume_evict_client(self
):
3131 That a subvolume client can be evicted based on the auth ID
3134 subvolumes
= self
._generate
_random
_subvolume
_name
(2)
3135 group
= self
._generate
_random
_group
_name
()
3138 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3140 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
3141 for i
in range(0, 2):
3142 self
.mounts
[i
].umount_wait()
3143 guest_mounts
= (self
.mounts
[0], self
.mounts
[1])
3147 "tenant_id": "tenant1",
3150 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
3151 # subvolumes. Mount the two subvolumes. Write data to the volumes.
3154 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolumes
[i
], "--group_name", group
, "--mode=777")
3156 # authorize guest authID read-write access to subvolume
3157 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolumes
[i
], guestclient_1
["auth_id"],
3158 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3160 mount_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolumes
[i
],
3161 "--group_name", group
).rstrip()
3162 # configure credentials for guest client
3163 self
._configure
_guest
_auth
(guest_mounts
[i
], auth_id
, key
)
3165 # mount the subvolume, and write to it
3166 guest_mounts
[i
].mount_wait(cephfs_mntpt
=mount_path
)
3167 guest_mounts
[i
].write_n_mb("data.bin", 1)
3169 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
3171 self
._fs
_cmd
("subvolume", "evict", self
.volname
, subvolumes
[0], auth_id
, "--group_name", group
)
3173 # Evicted guest client, guest_mounts[0], should not be able to do
3174 # anymore metadata ops. It should start failing all operations
3175 # when it sees that its own address is in the blocklist.
3177 guest_mounts
[0].write_n_mb("rogue.bin", 1)
3178 except CommandFailedError
:
3181 raise RuntimeError("post-eviction write should have failed!")
3183 # The blocklisted guest client should now be unmountable
3184 guest_mounts
[0].umount_wait()
3186 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
3187 # has mounted the other volume, should be able to use its volume
3189 guest_mounts
[1].write_n_mb("data.bin.1", 1)
3192 guest_mounts
[1].umount_wait()
3194 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolumes
[i
], auth_id
, "--group_name", group
)
3195 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolumes
[i
], "--group_name", group
)
3196 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3198 def test_subvolume_pin_random(self
):
3199 self
.fs
.set_max_mds(2)
3200 self
.fs
.wait_for_daemons()
3201 self
.config_set('mds', 'mds_export_ephemeral_random', True)
3203 subvolume
= self
._generate
_random
_subvolume
_name
()
3204 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3205 self
._fs
_cmd
("subvolume", "pin", self
.volname
, subvolume
, "random", ".01")
3209 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3211 # verify trash dir is clean
3212 self
._wait
_for
_trash
_empty
()
3214 def test_subvolume_resize_fail_invalid_size(self
):
3216 That a subvolume cannot be resized to an invalid size and the quota did not change
3219 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3221 subvolname
= self
._generate
_random
_subvolume
_name
()
3222 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
3224 # make sure it exists
3225 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3226 self
.assertNotEqual(subvolpath
, None)
3228 # try to resize the subvolume with an invalid size -10
3231 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3232 except CommandFailedError
as ce
:
3233 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
3235 self
.fail("expected the 'fs subvolume resize' command to fail")
3237 # verify the quota did not change
3238 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3239 self
.assertEqual(size
, osize
)
3242 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3244 # verify trash dir is clean
3245 self
._wait
_for
_trash
_empty
()
3247 def test_subvolume_resize_fail_zero_size(self
):
3249 That a subvolume cannot be resized to a zero size and the quota did not change
3252 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3254 subvolname
= self
._generate
_random
_subvolume
_name
()
3255 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
3257 # make sure it exists
3258 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3259 self
.assertNotEqual(subvolpath
, None)
3261 # try to resize the subvolume with size 0
3264 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3265 except CommandFailedError
as ce
:
3266 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
3268 self
.fail("expected the 'fs subvolume resize' command to fail")
3270 # verify the quota did not change
3271 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3272 self
.assertEqual(size
, osize
)
3275 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3277 # verify trash dir is clean
3278 self
._wait
_for
_trash
_empty
()
3280 def test_subvolume_resize_quota_lt_used_size(self
):
3282 That a subvolume can be resized to a size smaller than the current used size
3283 and the resulting quota matches the expected size.
3286 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
3288 subvolname
= self
._generate
_random
_subvolume
_name
()
3289 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
3291 # make sure it exists
3292 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3293 self
.assertNotEqual(subvolpath
, None)
3295 # create one file of 10MB
3296 file_size
=self
.DEFAULT_FILE_SIZE
*10
3298 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3301 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+1)
3302 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3304 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
3305 susedsize
= int(self
.mount_a
.run_shell(['stat', '-c' '%s', subvolpath
]).stdout
.getvalue().strip())
3306 if isinstance(self
.mount_a
, FuseMount
):
3307 # kclient dir does not have size==rbytes
3308 self
.assertEqual(usedsize
, susedsize
)
3310 # shrink the subvolume
3311 nsize
= usedsize
// 2
3313 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3314 except CommandFailedError
:
3315 self
.fail("expected the 'fs subvolume resize' command to succeed")
3318 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3319 self
.assertEqual(size
, nsize
)
3322 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3324 # verify trash dir is clean
3325 self
._wait
_for
_trash
_empty
()
3327 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self
):
3329 That a subvolume cannot be resized to a size smaller than the current used size
3330 when --no_shrink is given and the quota did not change.
3333 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
3335 subvolname
= self
._generate
_random
_subvolume
_name
()
3336 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
3338 # make sure it exists
3339 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3340 self
.assertNotEqual(subvolpath
, None)
3342 # create one file of 10MB
3343 file_size
=self
.DEFAULT_FILE_SIZE
*10
3345 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3348 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+2)
3349 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3351 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
3352 susedsize
= int(self
.mount_a
.run_shell(['stat', '-c' '%s', subvolpath
]).stdout
.getvalue().strip())
3353 if isinstance(self
.mount_a
, FuseMount
):
3354 # kclient dir does not have size==rbytes
3355 self
.assertEqual(usedsize
, susedsize
)
3357 # shrink the subvolume
3358 nsize
= usedsize
// 2
3360 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
), "--no_shrink")
3361 except CommandFailedError
as ce
:
3362 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
3364 self
.fail("expected the 'fs subvolume resize' command to fail")
3366 # verify the quota did not change
3367 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3368 self
.assertEqual(size
, osize
)
3371 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3373 # verify trash dir is clean
3374 self
._wait
_for
_trash
_empty
()
3376 def test_subvolume_resize_expand_on_full_subvolume(self
):
3378 That the subvolume can be expanded from a full subvolume and future writes succeed.
3381 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*10
3382 # create subvolume of quota 10MB and make sure it exists
3383 subvolname
= self
._generate
_random
_subvolume
_name
()
3384 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
3385 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3386 self
.assertNotEqual(subvolpath
, None)
3388 # create one file of size 10MB and write
3389 file_size
=self
.DEFAULT_FILE_SIZE
*10
3391 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3394 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+3)
3395 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3397 # create a file of size 5MB and try write more
3398 file_size
=file_size
// 2
3400 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3403 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+4)
3405 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3406 except CommandFailedError
:
3407 # Not able to write. So expand the subvolume more and try writing the 5MB file again
3409 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3411 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3412 except CommandFailedError
:
3413 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3414 "to succeed".format(subvolname
, number_of_files
, file_size
))
3416 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3417 "to fail".format(subvolname
, number_of_files
, file_size
))
3420 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3422 # verify trash dir is clean
3423 self
._wait
_for
_trash
_empty
()
3425 def test_subvolume_resize_infinite_size(self
):
3427 That a subvolume can be resized to an infinite size by unsetting its quota.
3431 subvolname
= self
._generate
_random
_subvolume
_name
()
3432 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size",
3433 str(self
.DEFAULT_FILE_SIZE
*1024*1024))
3435 # make sure it exists
3436 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3437 self
.assertNotEqual(subvolpath
, None)
3440 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, "inf")
3442 # verify that the quota is None
3443 size
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes")
3444 self
.assertEqual(size
, None)
3447 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3449 # verify trash dir is clean
3450 self
._wait
_for
_trash
_empty
()
3452 def test_subvolume_resize_infinite_size_future_writes(self
):
3454 That a subvolume can be resized to an infinite size and the future writes succeed.
3458 subvolname
= self
._generate
_random
_subvolume
_name
()
3459 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size",
3460 str(self
.DEFAULT_FILE_SIZE
*1024*1024*5), "--mode=777")
3462 # make sure it exists
3463 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3464 self
.assertNotEqual(subvolpath
, None)
3467 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, "inf")
3469 # verify that the quota is None
3470 size
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes")
3471 self
.assertEqual(size
, None)
3473 # create one file of 10MB and try to write
3474 file_size
=self
.DEFAULT_FILE_SIZE
*10
3476 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3479 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+5)
3482 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3483 except CommandFailedError
:
3484 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB "
3485 "to succeed".format(subvolname
, number_of_files
, file_size
))
3488 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3490 # verify trash dir is clean
3491 self
._wait
_for
_trash
_empty
()
3493 def test_subvolume_rm_force(self
):
3494 # test removing non-existing subvolume with --force
3495 subvolume
= self
._generate
_random
_subvolume
_name
()
3497 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--force")
3498 except CommandFailedError
:
3499 self
.fail("expected the 'fs subvolume rm --force' command to succeed")
3501 def test_subvolume_exists_with_subvolumegroup_and_subvolume(self
):
3502 """Test the presence of any subvolume by specifying the name of subvolumegroup"""
3504 group
= self
._generate
_random
_group
_name
()
3505 subvolume1
= self
._generate
_random
_subvolume
_name
()
3506 # create subvolumegroup
3507 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3508 # create subvolume in group
3509 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
3510 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
, "--group_name", group
)
3511 self
.assertEqual(ret
.strip('\n'), "subvolume exists")
3512 # delete subvolume in group
3513 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
3514 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
, "--group_name", group
)
3515 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3516 # delete subvolumegroup
3517 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3519 def test_subvolume_exists_with_subvolumegroup_and_no_subvolume(self
):
3520 """Test the presence of any subvolume specifying the name
3521 of subvolumegroup and no subvolumes"""
3523 group
= self
._generate
_random
_group
_name
()
3524 # create subvolumegroup
3525 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3526 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
, "--group_name", group
)
3527 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3528 # delete subvolumegroup
3529 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3531 def test_subvolume_exists_without_subvolumegroup_and_with_subvolume(self
):
3532 """Test the presence of any subvolume without specifying the name
3533 of subvolumegroup"""
3535 subvolume1
= self
._generate
_random
_subvolume
_name
()
3537 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
)
3538 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
)
3539 self
.assertEqual(ret
.strip('\n'), "subvolume exists")
3541 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
3542 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
)
3543 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3545 def test_subvolume_exists_without_subvolumegroup_and_without_subvolume(self
):
3546 """Test the presence of any subvolume without any subvolumegroup
3547 and without any subvolume"""
3549 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
)
3550 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3552 def test_subvolume_shrink(self
):
3554 That a subvolume can be shrinked in size and its quota matches the expected size.
3558 subvolname
= self
._generate
_random
_subvolume
_name
()
3559 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3560 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
3562 # make sure it exists
3563 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3564 self
.assertNotEqual(subvolpath
, None)
3566 # shrink the subvolume
3568 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3571 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3572 self
.assertEqual(size
, nsize
)
3575 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3577 # verify trash dir is clean
3578 self
._wait
_for
_trash
_empty
()
3580 def test_subvolume_retain_snapshot_rm_idempotency(self
):
3582 ensure subvolume deletion of a subvolume which is already deleted with retain snapshots option passes.
3583 After subvolume deletion with retain snapshots, the subvolume exists until the trash directory (resides inside subvolume)
3584 is cleaned up. The subvolume deletion issued while the trash directory is not empty, should pass and should
3585 not error out with EAGAIN.
3587 subvolume
= self
._generate
_random
_subvolume
_name
()
3588 snapshot
= self
._generate
_random
_snapshot
_name
()
3591 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3594 self
._do
_subvolume
_io
(subvolume
, number_of_files
=256)
3596 # snapshot subvolume
3597 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3599 # remove with snapshot retention
3600 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3602 # remove snapshots (removes retained volume)
3603 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3605 # remove subvolume (check idempotency)
3607 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3608 except CommandFailedError
as ce
:
3609 if ce
.exitstatus
!= errno
.ENOENT
:
3610 self
.fail(f
"expected subvolume rm to pass with error: {os.strerror(ce.exitstatus)}")
3612 # verify trash dir is clean
3613 self
._wait
_for
_trash
_empty
()
3616 def test_subvolume_user_metadata_set(self
):
3617 subvolname
= self
._generate
_random
_subvolume
_name
()
3618 group
= self
._generate
_random
_group
_name
()
3621 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3623 # create subvolume in group.
3624 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3626 # set metadata for subvolume.
3630 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3631 except CommandFailedError
:
3632 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
3634 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3635 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3637 # verify trash dir is clean.
3638 self
._wait
_for
_trash
_empty
()
3640 def test_subvolume_user_metadata_set_idempotence(self
):
3641 subvolname
= self
._generate
_random
_subvolume
_name
()
3642 group
= self
._generate
_random
_group
_name
()
3645 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3647 # create subvolume in group.
3648 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3650 # set metadata for subvolume.
3654 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3655 except CommandFailedError
:
3656 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
3658 # set same metadata again for subvolume.
3660 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3661 except CommandFailedError
:
3662 self
.fail("expected the 'fs subvolume metadata set' command to succeed because it is idempotent operation")
3664 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3665 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3667 # verify trash dir is clean.
3668 self
._wait
_for
_trash
_empty
()
3670 def test_subvolume_user_metadata_get(self
):
3671 subvolname
= self
._generate
_random
_subvolume
_name
()
3672 group
= self
._generate
_random
_group
_name
()
3675 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3677 # create subvolume in group.
3678 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3680 # set metadata for subvolume.
3683 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3685 # get value for specified key.
3687 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3688 except CommandFailedError
:
3689 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
3691 # remove '\n' from returned value.
3692 ret
= ret
.strip('\n')
3694 # match received value with expected value.
3695 self
.assertEqual(value
, ret
)
3697 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3698 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3700 # verify trash dir is clean.
3701 self
._wait
_for
_trash
_empty
()
3703 def test_subvolume_user_metadata_get_for_nonexisting_key(self
):
3704 subvolname
= self
._generate
_random
_subvolume
_name
()
3705 group
= self
._generate
_random
_group
_name
()
3708 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3710 # create subvolume in group.
3711 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3713 # set metadata for subvolume.
3716 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3718 # try to get value for nonexisting key
3719 # Expecting ENOENT exit status because key does not exist
3721 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key_nonexist", "--group_name", group
)
3722 except CommandFailedError
as e
:
3723 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3725 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
3727 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3728 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3730 # verify trash dir is clean.
3731 self
._wait
_for
_trash
_empty
()
3733 def test_subvolume_user_metadata_get_for_nonexisting_section(self
):
3734 subvolname
= self
._generate
_random
_subvolume
_name
()
3735 group
= self
._generate
_random
_group
_name
()
3738 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3740 # create subvolume in group.
3741 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3743 # try to get value for nonexisting key (as section does not exist)
3744 # Expecting ENOENT exit status because key does not exist
3746 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key", "--group_name", group
)
3747 except CommandFailedError
as e
:
3748 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3750 self
.fail("Expected ENOENT because section does not exist")
3752 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3753 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3755 # verify trash dir is clean.
3756 self
._wait
_for
_trash
_empty
()
3758 def test_subvolume_user_metadata_update(self
):
3759 subvolname
= self
._generate
_random
_subvolume
_name
()
3760 group
= self
._generate
_random
_group
_name
()
3763 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3765 # create subvolume in group.
3766 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3768 # set metadata for subvolume.
3771 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3773 # update metadata against key.
3774 new_value
= "new_value"
3775 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, new_value
, "--group_name", group
)
3777 # get metadata for specified key of subvolume.
3779 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3780 except CommandFailedError
:
3781 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
3783 # remove '\n' from returned value.
3784 ret
= ret
.strip('\n')
3786 # match received value with expected value.
3787 self
.assertEqual(new_value
, ret
)
3789 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3790 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3792 # verify trash dir is clean.
3793 self
._wait
_for
_trash
_empty
()
3795 def test_subvolume_user_metadata_list(self
):
3796 subvolname
= self
._generate
_random
_subvolume
_name
()
3797 group
= self
._generate
_random
_group
_name
()
3800 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3802 # create subvolume in group.
3803 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3805 # set metadata for subvolume.
3806 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
3808 for k
, v
in input_metadata_dict
.items():
3809 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, k
, v
, "--group_name", group
)
3813 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
3814 except CommandFailedError
:
3815 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
3817 ret_dict
= json
.loads(ret
)
3819 # compare output with expected output
3820 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
3822 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3823 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3825 # verify trash dir is clean.
3826 self
._wait
_for
_trash
_empty
()
3828 def test_subvolume_user_metadata_list_if_no_metadata_set(self
):
3829 subvolname
= self
._generate
_random
_subvolume
_name
()
3830 group
= self
._generate
_random
_group
_name
()
3833 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3835 # create subvolume in group.
3836 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3840 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
3841 except CommandFailedError
:
3842 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
3844 # remove '\n' from returned value.
3845 ret
= ret
.strip('\n')
3847 # compare output with expected output
3848 # expecting empty json/dictionary
3849 self
.assertEqual(ret
, "{}")
3851 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3852 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3854 # verify trash dir is clean.
3855 self
._wait
_for
_trash
_empty
()
3857 def test_subvolume_user_metadata_remove(self
):
3858 subvolname
= self
._generate
_random
_subvolume
_name
()
3859 group
= self
._generate
_random
_group
_name
()
3862 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3864 # create subvolume in group.
3865 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3867 # set metadata for subvolume.
3870 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3872 # remove metadata against specified key.
3874 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
)
3875 except CommandFailedError
:
3876 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
3878 # confirm key is removed by again fetching metadata
3880 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3881 except CommandFailedError
as e
:
3882 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3884 self
.fail("Expected ENOENT because key does not exist")
3886 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3887 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3889 # verify trash dir is clean.
3890 self
._wait
_for
_trash
_empty
()
3892 def test_subvolume_user_metadata_remove_for_nonexisting_key(self
):
3893 subvolname
= self
._generate
_random
_subvolume
_name
()
3894 group
= self
._generate
_random
_group
_name
()
3897 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3899 # create subvolume in group.
3900 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3902 # set metadata for subvolume.
3905 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3907 # try to remove value for nonexisting key
3908 # Expecting ENOENT exit status because key does not exist
3910 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key_nonexist", "--group_name", group
)
3911 except CommandFailedError
as e
:
3912 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3914 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
3916 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3917 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3919 # verify trash dir is clean.
3920 self
._wait
_for
_trash
_empty
()
3922 def test_subvolume_user_metadata_remove_for_nonexisting_section(self
):
3923 subvolname
= self
._generate
_random
_subvolume
_name
()
3924 group
= self
._generate
_random
_group
_name
()
3927 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3929 # create subvolume in group.
3930 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3932 # try to remove value for nonexisting key (as section does not exist)
3933 # Expecting ENOENT exit status because key does not exist
3935 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key", "--group_name", group
)
3936 except CommandFailedError
as e
:
3937 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3939 self
.fail("Expected ENOENT because section does not exist")
3941 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3942 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3944 # verify trash dir is clean.
3945 self
._wait
_for
_trash
_empty
()
3947 def test_subvolume_user_metadata_remove_force(self
):
3948 subvolname
= self
._generate
_random
_subvolume
_name
()
3949 group
= self
._generate
_random
_group
_name
()
3952 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3954 # create subvolume in group.
3955 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3957 # set metadata for subvolume.
3960 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3962 # remove metadata against specified key with --force option.
3964 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
, "--force")
3965 except CommandFailedError
:
3966 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
3968 # confirm key is removed by again fetching metadata
3970 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3971 except CommandFailedError
as e
:
3972 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3974 self
.fail("Expected ENOENT because key does not exist")
3976 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3977 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3979 # verify trash dir is clean.
3980 self
._wait
_for
_trash
_empty
()
3982 def test_subvolume_user_metadata_remove_force_for_nonexisting_key(self
):
3983 subvolname
= self
._generate
_random
_subvolume
_name
()
3984 group
= self
._generate
_random
_group
_name
()
3987 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3989 # create subvolume in group.
3990 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3992 # set metadata for subvolume.
3995 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3997 # remove metadata against specified key.
3999 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
)
4000 except CommandFailedError
:
4001 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
4003 # confirm key is removed by again fetching metadata
4005 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
4006 except CommandFailedError
as e
:
4007 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4009 self
.fail("Expected ENOENT because key does not exist")
4011 # again remove metadata against already removed key with --force option.
4013 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
, "--force")
4014 except CommandFailedError
:
4015 self
.fail("expected the 'fs subvolume metadata rm' (with --force) command to succeed")
4017 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4018 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4020 # verify trash dir is clean.
4021 self
._wait
_for
_trash
_empty
()
4023 def test_subvolume_user_metadata_set_and_get_for_legacy_subvolume(self
):
4024 subvolname
= self
._generate
_random
_subvolume
_name
()
4025 group
= self
._generate
_random
_group
_name
()
4027 # emulate a old-fashioned subvolume in a custom group
4028 createpath
= os
.path
.join(".", "volumes", group
, subvolname
)
4029 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
4031 # set metadata for subvolume.
4035 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
4036 except CommandFailedError
:
4037 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
4039 # get value for specified key.
4041 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
4042 except CommandFailedError
:
4043 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
4045 # remove '\n' from returned value.
4046 ret
= ret
.strip('\n')
4048 # match received value with expected value.
4049 self
.assertEqual(value
, ret
)
4051 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4052 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4054 # verify trash dir is clean.
4055 self
._wait
_for
_trash
_empty
()
4057 def test_subvolume_user_metadata_list_and_remove_for_legacy_subvolume(self
):
4058 subvolname
= self
._generate
_random
_subvolume
_name
()
4059 group
= self
._generate
_random
_group
_name
()
4061 # emulate a old-fashioned subvolume in a custom group
4062 createpath
= os
.path
.join(".", "volumes", group
, subvolname
)
4063 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
4065 # set metadata for subvolume.
4066 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
4068 for k
, v
in input_metadata_dict
.items():
4069 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, k
, v
, "--group_name", group
)
4073 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
4074 except CommandFailedError
:
4075 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
4077 ret_dict
= json
.loads(ret
)
4079 # compare output with expected output
4080 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
4082 # remove metadata against specified key.
4084 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key_1", "--group_name", group
)
4085 except CommandFailedError
:
4086 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
4088 # confirm key is removed by again fetching metadata
4090 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key_1", "--group_name", group
)
4091 except CommandFailedError
as e
:
4092 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4094 self
.fail("Expected ENOENT because key_1 does not exist")
4096 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4097 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4099 # verify trash dir is clean.
4100 self
._wait
_for
_trash
_empty
()
4102 class TestSubvolumeGroupSnapshots(TestVolumesHelper
):
4103 """Tests for FS subvolume group snapshot operations."""
4104 @unittest.skip("skipping subvolumegroup snapshot tests")
4105 def test_nonexistent_subvolume_group_snapshot_rm(self
):
4106 subvolume
= self
._generate
_random
_subvolume
_name
()
4107 group
= self
._generate
_random
_group
_name
()
4108 snapshot
= self
._generate
_random
_snapshot
_name
()
4111 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4113 # create subvolume in group
4114 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4117 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4120 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4124 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4125 except CommandFailedError
as ce
:
4126 if ce
.exitstatus
!= errno
.ENOENT
:
4129 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
4132 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4134 # verify trash dir is clean
4135 self
._wait
_for
_trash
_empty
()
4138 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4140 @unittest.skip("skipping subvolumegroup snapshot tests")
4141 def test_subvolume_group_snapshot_create_and_rm(self
):
4142 subvolume
= self
._generate
_random
_subvolume
_name
()
4143 group
= self
._generate
_random
_group
_name
()
4144 snapshot
= self
._generate
_random
_snapshot
_name
()
4147 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4149 # create subvolume in group
4150 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4153 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4156 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4159 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4161 # verify trash dir is clean
4162 self
._wait
_for
_trash
_empty
()
4165 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4167 @unittest.skip("skipping subvolumegroup snapshot tests")
4168 def test_subvolume_group_snapshot_idempotence(self
):
4169 subvolume
= self
._generate
_random
_subvolume
_name
()
4170 group
= self
._generate
_random
_group
_name
()
4171 snapshot
= self
._generate
_random
_snapshot
_name
()
4174 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4176 # create subvolume in group
4177 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4180 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4182 # try creating snapshot w/ same snapshot name -- shoule be idempotent
4183 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4186 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4189 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4191 # verify trash dir is clean
4192 self
._wait
_for
_trash
_empty
()
4195 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4197 @unittest.skip("skipping subvolumegroup snapshot tests")
4198 def test_subvolume_group_snapshot_ls(self
):
4199 # tests the 'fs subvolumegroup snapshot ls' command
4204 group
= self
._generate
_random
_group
_name
()
4205 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4207 # create subvolumegroup snapshots
4208 snapshots
= self
._generate
_random
_snapshot
_name
(3)
4209 for snapshot
in snapshots
:
4210 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4212 subvolgrpsnapshotls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'snapshot', 'ls', self
.volname
, group
))
4213 if len(subvolgrpsnapshotls
) == 0:
4214 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
4216 snapshotnames
= [snapshot
['name'] for snapshot
in subvolgrpsnapshotls
]
4217 if collections
.Counter(snapshotnames
) != collections
.Counter(snapshots
):
4218 raise RuntimeError("Error creating or listing subvolume group snapshots")
4220 @unittest.skip("skipping subvolumegroup snapshot tests")
4221 def test_subvolume_group_snapshot_rm_force(self
):
4222 # test removing non-existing subvolume group snapshot with --force
4223 group
= self
._generate
_random
_group
_name
()
4224 snapshot
= self
._generate
_random
_snapshot
_name
()
4227 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
, "--force")
4228 except CommandFailedError
:
4229 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
4231 def test_subvolume_group_snapshot_unsupported_status(self
):
4232 group
= self
._generate
_random
_group
_name
()
4233 snapshot
= self
._generate
_random
_snapshot
_name
()
4236 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4240 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4241 except CommandFailedError
as ce
:
4242 self
.assertEqual(ce
.exitstatus
, errno
.ENOSYS
, "invalid error code on subvolumegroup snapshot create")
4244 self
.fail("expected subvolumegroup snapshot create command to fail")
4247 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4250 class TestSubvolumeSnapshots(TestVolumesHelper
):
4251 """Tests for FS subvolume snapshot operations."""
4252 def test_nonexistent_subvolume_snapshot_rm(self
):
4253 subvolume
= self
._generate
_random
_subvolume
_name
()
4254 snapshot
= self
._generate
_random
_snapshot
_name
()
4257 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4259 # snapshot subvolume
4260 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4263 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4265 # remove snapshot again
4267 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4268 except CommandFailedError
as ce
:
4269 if ce
.exitstatus
!= errno
.ENOENT
:
4272 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
4275 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4277 # verify trash dir is clean
4278 self
._wait
_for
_trash
_empty
()
4280 def test_subvolume_snapshot_create_and_rm(self
):
4281 subvolume
= self
._generate
_random
_subvolume
_name
()
4282 snapshot
= self
._generate
_random
_snapshot
_name
()
4285 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4287 # snapshot subvolume
4288 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4291 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4294 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4296 # verify trash dir is clean
4297 self
._wait
_for
_trash
_empty
()
4299 def test_subvolume_snapshot_create_idempotence(self
):
4300 subvolume
= self
._generate
_random
_subvolume
_name
()
4301 snapshot
= self
._generate
_random
_snapshot
_name
()
4304 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4306 # snapshot subvolume
4307 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4309 # try creating w/ same subvolume snapshot name -- should be idempotent
4310 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4313 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4316 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4318 # verify trash dir is clean
4319 self
._wait
_for
_trash
_empty
()
4321 def test_subvolume_snapshot_info(self
):
4324 tests the 'fs subvolume snapshot info' command
4327 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
4329 subvolume
= self
._generate
_random
_subvolume
_name
()
4330 snapshot
, snap_missing
= self
._generate
_random
_snapshot
_name
(2)
4333 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4336 self
._do
_subvolume
_io
(subvolume
, number_of_files
=1)
4338 # snapshot subvolume
4339 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4341 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
4343 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
4344 self
.assertEqual(snap_info
["has_pending_clones"], "no")
4346 # snapshot info for non-existent snapshot
4348 self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snap_missing
)
4349 except CommandFailedError
as ce
:
4350 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on snapshot info of non-existent snapshot")
4352 self
.fail("expected snapshot info of non-existent snapshot to fail")
4355 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4358 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4360 # verify trash dir is clean
4361 self
._wait
_for
_trash
_empty
()
4363 def test_subvolume_snapshot_in_group(self
):
4364 subvolume
= self
._generate
_random
_subvolume
_name
()
4365 group
= self
._generate
_random
_group
_name
()
4366 snapshot
= self
._generate
_random
_snapshot
_name
()
4369 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4371 # create subvolume in group
4372 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4374 # snapshot subvolume in group
4375 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
4378 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
4381 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4383 # verify trash dir is clean
4384 self
._wait
_for
_trash
_empty
()
4387 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4389 def test_subvolume_snapshot_ls(self
):
4390 # tests the 'fs subvolume snapshot ls' command
4395 subvolume
= self
._generate
_random
_subvolume
_name
()
4396 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4398 # create subvolume snapshots
4399 snapshots
= self
._generate
_random
_snapshot
_name
(3)
4400 for snapshot
in snapshots
:
4401 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4403 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
4404 if len(subvolsnapshotls
) == 0:
4405 self
.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
4407 snapshotnames
= [snapshot
['name'] for snapshot
in subvolsnapshotls
]
4408 if collections
.Counter(snapshotnames
) != collections
.Counter(snapshots
):
4409 self
.fail("Error creating or listing subvolume snapshots")
4412 for snapshot
in snapshots
:
4413 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4416 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4418 # verify trash dir is clean
4419 self
._wait
_for
_trash
_empty
()
4421 def test_subvolume_inherited_snapshot_ls(self
):
4422 # tests the scenario where 'fs subvolume snapshot ls' command
4423 # should not list inherited snapshots created as part of snapshot
4424 # at ancestral level
4427 subvolume
= self
._generate
_random
_subvolume
_name
()
4428 group
= self
._generate
_random
_group
_name
()
4432 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4434 # create subvolume in group
4435 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4437 # create subvolume snapshots
4438 snapshots
= self
._generate
_random
_snapshot
_name
(snap_count
)
4439 for snapshot
in snapshots
:
4440 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
4442 # Create snapshot at ancestral level
4443 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", "ancestral_snap_1")
4444 ancestral_snappath2
= os
.path
.join(".", "volumes", group
, ".snap", "ancestral_snap_2")
4445 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1
, ancestral_snappath2
], omit_sudo
=False)
4447 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
, group
))
4448 self
.assertEqual(len(subvolsnapshotls
), snap_count
)
4450 # remove ancestral snapshots
4451 self
.mount_a
.run_shell(['sudo', 'rmdir', ancestral_snappath1
, ancestral_snappath2
], omit_sudo
=False)
4454 for snapshot
in snapshots
:
4455 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
4458 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4460 # verify trash dir is clean
4461 self
._wait
_for
_trash
_empty
()
4464 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4466 def test_subvolume_inherited_snapshot_info(self
):
4468 tests the scenario where 'fs subvolume snapshot info' command
4469 should fail for inherited snapshots created as part of snapshot
4473 subvolume
= self
._generate
_random
_subvolume
_name
()
4474 group
= self
._generate
_random
_group
_name
()
4477 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4479 # create subvolume in group
4480 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4482 # Create snapshot at ancestral level
4483 ancestral_snap_name
= "ancestral_snap_1"
4484 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", ancestral_snap_name
)
4485 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1
], omit_sudo
=False)
4487 # Validate existence of inherited snapshot
4488 group_path
= os
.path
.join(".", "volumes", group
)
4489 inode_number_group_dir
= int(self
.mount_a
.run_shell(['stat', '-c' '%i', group_path
]).stdout
.getvalue().strip())
4490 inherited_snap
= "_{0}_{1}".format(ancestral_snap_name
, inode_number_group_dir
)
4491 inherited_snappath
= os
.path
.join(".", "volumes", group
, subvolume
,".snap", inherited_snap
)
4492 self
.mount_a
.run_shell(['ls', inherited_snappath
])
4494 # snapshot info on inherited snapshot
4496 self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, inherited_snap
, group
)
4497 except CommandFailedError
as ce
:
4498 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on snapshot info of inherited snapshot")
4500 self
.fail("expected snapshot info of inherited snapshot to fail")
4502 # remove ancestral snapshots
4503 self
.mount_a
.run_shell(['sudo', 'rmdir', ancestral_snappath1
], omit_sudo
=False)
4506 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
4508 # verify trash dir is clean
4509 self
._wait
_for
_trash
_empty
()
4512 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4514 def test_subvolume_inherited_snapshot_rm(self
):
4516 tests the scenario where 'fs subvolume snapshot rm' command
4517 should fail for inherited snapshots created as part of snapshot
4521 subvolume
= self
._generate
_random
_subvolume
_name
()
4522 group
= self
._generate
_random
_group
_name
()
4525 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4527 # create subvolume in group
4528 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4530 # Create snapshot at ancestral level
4531 ancestral_snap_name
= "ancestral_snap_1"
4532 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", ancestral_snap_name
)
4533 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1
], omit_sudo
=False)
4535 # Validate existence of inherited snap
4536 group_path
= os
.path
.join(".", "volumes", group
)
4537 inode_number_group_dir
= int(self
.mount_a
.run_shell(['stat', '-c' '%i', group_path
]).stdout
.getvalue().strip())
4538 inherited_snap
= "_{0}_{1}".format(ancestral_snap_name
, inode_number_group_dir
)
4539 inherited_snappath
= os
.path
.join(".", "volumes", group
, subvolume
,".snap", inherited_snap
)
4540 self
.mount_a
.run_shell(['ls', inherited_snappath
])
4542 # inherited snapshot should not be deletable
4544 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, inherited_snap
, "--group_name", group
)
4545 except CommandFailedError
as ce
:
4546 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, msg
="invalid error code when removing inherited snapshot")
4548 self
.fail("expected removing inheirted snapshot to fail")
4550 # remove ancestral snapshots
4551 self
.mount_a
.run_shell(['sudo', 'rmdir', ancestral_snappath1
], omit_sudo
=False)
4554 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4556 # verify trash dir is clean
4557 self
._wait
_for
_trash
_empty
()
4560 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4562 def test_subvolume_subvolumegroup_snapshot_name_conflict(self
):
4564 tests the scenario where creation of subvolume snapshot name
4565 with same name as it's subvolumegroup snapshot name. This should
4569 subvolume
= self
._generate
_random
_subvolume
_name
()
4570 group
= self
._generate
_random
_group
_name
()
4571 group_snapshot
= self
._generate
_random
_snapshot
_name
()
4574 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4576 # create subvolume in group
4577 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4579 # Create subvolumegroup snapshot
4580 group_snapshot_path
= os
.path
.join(".", "volumes", group
, ".snap", group_snapshot
)
4581 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', group_snapshot_path
], omit_sudo
=False)
4583 # Validate existence of subvolumegroup snapshot
4584 self
.mount_a
.run_shell(['ls', group_snapshot_path
])
4586 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
4588 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, group_snapshot
, "--group_name", group
)
4589 except CommandFailedError
as ce
:
4590 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, msg
="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
4592 self
.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
4594 # remove subvolumegroup snapshot
4595 self
.mount_a
.run_shell(['sudo', 'rmdir', group_snapshot_path
], omit_sudo
=False)
4598 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4600 # verify trash dir is clean
4601 self
._wait
_for
_trash
_empty
()
4604 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4606 def test_subvolume_retain_snapshot_invalid_recreate(self
):
4608 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
4610 subvolume
= self
._generate
_random
_subvolume
_name
()
4611 snapshot
= self
._generate
_random
_snapshot
_name
()
4614 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4616 # snapshot subvolume
4617 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4619 # remove with snapshot retention
4620 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4622 # recreate subvolume with an invalid pool
4623 data_pool
= "invalid_pool"
4625 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
4626 except CommandFailedError
as ce
:
4627 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on recreate of subvolume with invalid poolname")
4629 self
.fail("expected recreate of subvolume with invalid poolname to fail")
4632 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4633 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
4634 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4638 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
4639 except CommandFailedError
as ce
:
4640 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of subvolume with retained snapshots")
4642 self
.fail("expected getpath of subvolume with retained snapshots to fail")
4644 # remove snapshot (should remove volume)
4645 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4647 # verify trash dir is clean
4648 self
._wait
_for
_trash
_empty
()
4650 def test_subvolume_retain_snapshot_recreate_subvolume(self
):
4652 ensure a retained subvolume can be recreated and further snapshotted
4654 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
4656 subvolume
= self
._generate
_random
_subvolume
_name
()
4657 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
4660 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4662 # snapshot subvolume
4663 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
4665 # remove with snapshot retention
4666 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4669 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4670 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
4671 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4673 # recreate retained subvolume
4674 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4677 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4678 self
.assertEqual(subvol_info
["state"], "complete",
4679 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4681 # snapshot info (older snapshot)
4682 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot1
))
4684 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
4685 self
.assertEqual(snap_info
["has_pending_clones"], "no")
4687 # snap-create (new snapshot)
4688 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot2
)
4690 # remove with retain snapshots
4691 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4694 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
4695 self
.assertEqual(len(subvolsnapshotls
), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
4696 " created subvolume snapshots")
4697 snapshotnames
= [snapshot
['name'] for snapshot
in subvolsnapshotls
]
4698 for snap
in [snapshot1
, snapshot2
]:
4699 self
.assertIn(snap
, snapshotnames
, "Missing snapshot '{0}' in snapshot list".format(snap
))
4701 # remove snapshots (should remove volume)
4702 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
4703 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot2
)
4705 # verify list subvolumes returns an empty list
4706 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4707 self
.assertEqual(len(subvolumels
), 0)
4709 # verify trash dir is clean
4710 self
._wait
_for
_trash
_empty
()
4712 def test_subvolume_retain_snapshot_with_snapshots(self
):
4714 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
4715 also test allowed and dis-allowed operations on a retained subvolume
4717 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
4719 subvolume
= self
._generate
_random
_subvolume
_name
()
4720 snapshot
= self
._generate
_random
_snapshot
_name
()
4723 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4725 # snapshot subvolume
4726 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4728 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4730 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4731 except CommandFailedError
as ce
:
4732 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of retained subvolume with snapshots")
4734 self
.fail("expected rm of subvolume with retained snapshots to fail")
4736 # remove with snapshot retention
4737 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4740 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4741 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
4742 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4744 ## test allowed ops in retained state
4746 subvolumes
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4747 self
.assertEqual(len(subvolumes
), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes
)))
4748 self
.assertEqual(subvolumes
[0]['name'], subvolume
,
4749 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume
, subvolumes
[0]['name']))
4752 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
4754 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
4755 self
.assertEqual(snap_info
["has_pending_clones"], "no")
4757 # rm --force (allowed but should fail)
4759 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--force")
4760 except CommandFailedError
as ce
:
4761 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of subvolume with retained snapshots")
4763 self
.fail("expected rm of subvolume with retained snapshots to fail")
4765 # rm (allowed but should fail)
4767 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4768 except CommandFailedError
as ce
:
4769 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of subvolume with retained snapshots")
4771 self
.fail("expected rm of subvolume with retained snapshots to fail")
4773 ## test disallowed ops
4776 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
4777 except CommandFailedError
as ce
:
4778 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of subvolume with retained snapshots")
4780 self
.fail("expected getpath of subvolume with retained snapshots to fail")
4783 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
4785 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
4786 except CommandFailedError
as ce
:
4787 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on resize of subvolume with retained snapshots")
4789 self
.fail("expected resize of subvolume with retained snapshots to fail")
4793 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, "fail")
4794 except CommandFailedError
as ce
:
4795 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on snapshot create of subvolume with retained snapshots")
4797 self
.fail("expected snapshot create of subvolume with retained snapshots to fail")
4799 # remove snapshot (should remove volume)
4800 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4802 # verify list subvolumes returns an empty list
4803 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4804 self
.assertEqual(len(subvolumels
), 0)
4806 # verify trash dir is clean
4807 self
._wait
_for
_trash
_empty
()
4809 def test_subvolume_retain_snapshot_without_snapshots(self
):
4811 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
4813 subvolume
= self
._generate
_random
_subvolume
_name
()
4816 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4818 # remove with snapshot retention (should remove volume, no snapshots to retain)
4819 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4821 # verify list subvolumes returns an empty list
4822 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4823 self
.assertEqual(len(subvolumels
), 0)
4825 # verify trash dir is clean
4826 self
._wait
_for
_trash
_empty
()
4828 def test_subvolume_retain_snapshot_trash_busy_recreate(self
):
4830 ensure retained subvolume recreate fails if its trash is not yet purged
4832 subvolume
= self
._generate
_random
_subvolume
_name
()
4833 snapshot
= self
._generate
_random
_snapshot
_name
()
4836 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4838 # snapshot subvolume
4839 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4841 # remove with snapshot retention
4842 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4844 # fake a trash entry
4845 self
._update
_fake
_trash
(subvolume
)
4847 # recreate subvolume
4849 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4850 except CommandFailedError
as ce
:
4851 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on recreate of subvolume with purge pending")
4853 self
.fail("expected recreate of subvolume with purge pending to fail")
4855 # clear fake trash entry
4856 self
._update
_fake
_trash
(subvolume
, create
=False)
4858 # recreate subvolume
4859 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4862 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4865 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4867 # verify trash dir is clean
4868 self
._wait
_for
_trash
_empty
()
4870 def test_subvolume_rm_with_snapshots(self
):
4871 subvolume
= self
._generate
_random
_subvolume
_name
()
4872 snapshot
= self
._generate
_random
_snapshot
_name
()
4875 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4877 # snapshot subvolume
4878 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4880 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4882 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4883 except CommandFailedError
as ce
:
4884 if ce
.exitstatus
!= errno
.ENOTEMPTY
:
4885 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
4887 raise RuntimeError("expected subvolume deletion to fail")
4890 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4893 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4895 # verify trash dir is clean
4896 self
._wait
_for
_trash
_empty
()
4898 def test_subvolume_snapshot_protect_unprotect_sanity(self
):
4900 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
4901 invoking the command does not cause errors, till they are removed from a subsequent release.
4903 subvolume
= self
._generate
_random
_subvolume
_name
()
4904 snapshot
= self
._generate
_random
_snapshot
_name
()
4905 clone
= self
._generate
_random
_clone
_name
()
4908 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4911 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
4913 # snapshot subvolume
4914 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4916 # now, protect snapshot
4917 self
._fs
_cmd
("subvolume", "snapshot", "protect", self
.volname
, subvolume
, snapshot
)
4920 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4922 # check clone status
4923 self
._wait
_for
_clone
_to
_complete
(clone
)
4925 # now, unprotect snapshot
4926 self
._fs
_cmd
("subvolume", "snapshot", "unprotect", self
.volname
, subvolume
, snapshot
)
4929 self
._verify
_clone
(subvolume
, snapshot
, clone
)
4932 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4935 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4936 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4938 # verify trash dir is clean
4939 self
._wait
_for
_trash
_empty
()
4941 def test_subvolume_snapshot_rm_force(self
):
4942 # test removing non existing subvolume snapshot with --force
4943 subvolume
= self
._generate
_random
_subvolume
_name
()
4944 snapshot
= self
._generate
_random
_snapshot
_name
()
4948 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, "--force")
4949 except CommandFailedError
:
4950 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
4952 def test_subvolume_snapshot_metadata_set(self
):
4954 Set custom metadata for subvolume snapshot.
4956 subvolname
= self
._generate
_random
_subvolume
_name
()
4957 group
= self
._generate
_random
_group
_name
()
4958 snapshot
= self
._generate
_random
_snapshot
_name
()
4961 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4963 # create subvolume in group.
4964 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4966 # snapshot subvolume
4967 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4969 # set metadata for snapshot.
4973 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
4974 except CommandFailedError
:
4975 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
4977 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4978 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4979 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4981 # verify trash dir is clean.
4982 self
._wait
_for
_trash
_empty
()
4984 def test_subvolume_snapshot_metadata_set_idempotence(self
):
4986 Set custom metadata for subvolume snapshot (Idempotency).
4988 subvolname
= self
._generate
_random
_subvolume
_name
()
4989 group
= self
._generate
_random
_group
_name
()
4990 snapshot
= self
._generate
_random
_snapshot
_name
()
4993 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4995 # create subvolume in group.
4996 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4998 # snapshot subvolume
4999 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5001 # set metadata for snapshot.
5005 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5006 except CommandFailedError
:
5007 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5009 # set same metadata again for subvolume.
5011 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5012 except CommandFailedError
:
5013 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed because it is idempotent operation")
5015 # get value for specified key.
5017 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5018 except CommandFailedError
:
5019 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5021 # remove '\n' from returned value.
5022 ret
= ret
.strip('\n')
5024 # match received value with expected value.
5025 self
.assertEqual(value
, ret
)
5027 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5028 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5029 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5031 # verify trash dir is clean.
5032 self
._wait
_for
_trash
_empty
()
5034 def test_subvolume_snapshot_metadata_get(self
):
5036 Get custom metadata for a specified key in subvolume snapshot metadata.
5038 subvolname
= self
._generate
_random
_subvolume
_name
()
5039 group
= self
._generate
_random
_group
_name
()
5040 snapshot
= self
._generate
_random
_snapshot
_name
()
5043 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5045 # create subvolume in group.
5046 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5048 # snapshot subvolume
5049 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5051 # set metadata for snapshot.
5054 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5056 # get value for specified key.
5058 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5059 except CommandFailedError
:
5060 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5062 # remove '\n' from returned value.
5063 ret
= ret
.strip('\n')
5065 # match received value with expected value.
5066 self
.assertEqual(value
, ret
)
5068 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5069 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5070 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5072 # verify trash dir is clean.
5073 self
._wait
_for
_trash
_empty
()
5075 def test_subvolume_snapshot_metadata_get_for_nonexisting_key(self
):
5077 Get custom metadata for subvolume snapshot if specified key not exist in metadata.
5079 subvolname
= self
._generate
_random
_subvolume
_name
()
5080 group
= self
._generate
_random
_group
_name
()
5081 snapshot
= self
._generate
_random
_snapshot
_name
()
5084 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5086 # create subvolume in group.
5087 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5089 # snapshot subvolume
5090 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5092 # set metadata for snapshot.
5095 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5097 # try to get value for nonexisting key
5098 # Expecting ENOENT exit status because key does not exist
5100 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, "key_nonexist", group
)
5101 except CommandFailedError
as e
:
5102 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5104 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
5106 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5107 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5108 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5110 # verify trash dir is clean.
5111 self
._wait
_for
_trash
_empty
()
5113 def test_subvolume_snapshot_metadata_get_for_nonexisting_section(self
):
5115 Get custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5117 subvolname
= self
._generate
_random
_subvolume
_name
()
5118 group
= self
._generate
_random
_group
_name
()
5119 snapshot
= self
._generate
_random
_snapshot
_name
()
5122 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5124 # create subvolume in group.
5125 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5127 # snapshot subvolume
5128 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5130 # try to get value for nonexisting key (as section does not exist)
5131 # Expecting ENOENT exit status because key does not exist
5133 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, "key", group
)
5134 except CommandFailedError
as e
:
5135 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5137 self
.fail("Expected ENOENT because section does not exist")
5139 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5140 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5141 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5143 # verify trash dir is clean.
5144 self
._wait
_for
_trash
_empty
()
5146 def test_subvolume_snapshot_metadata_update(self
):
5148 Update custom metadata for a specified key in subvolume snapshot metadata.
5150 subvolname
= self
._generate
_random
_subvolume
_name
()
5151 group
= self
._generate
_random
_group
_name
()
5152 snapshot
= self
._generate
_random
_snapshot
_name
()
5155 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5157 # create subvolume in group.
5158 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5160 # snapshot subvolume
5161 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5163 # set metadata for snapshot.
5166 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5168 # update metadata against key.
5169 new_value
= "new_value"
5170 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, new_value
, group
)
5172 # get metadata for specified key of snapshot.
5174 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5175 except CommandFailedError
:
5176 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5178 # remove '\n' from returned value.
5179 ret
= ret
.strip('\n')
5181 # match received value with expected value.
5182 self
.assertEqual(new_value
, ret
)
5184 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5185 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5186 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5188 # verify trash dir is clean.
5189 self
._wait
_for
_trash
_empty
()
5191 def test_subvolume_snapshot_metadata_list(self
):
5193 List custom metadata for subvolume snapshot.
5195 subvolname
= self
._generate
_random
_subvolume
_name
()
5196 group
= self
._generate
_random
_group
_name
()
5197 snapshot
= self
._generate
_random
_snapshot
_name
()
5200 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5202 # create subvolume in group.
5203 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5205 # snapshot subvolume
5206 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5208 # set metadata for subvolume.
5209 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
5211 for k
, v
in input_metadata_dict
.items():
5212 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, k
, v
, group
)
5216 ret_dict
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "metadata", "ls", self
.volname
, subvolname
, snapshot
, group
))
5217 except CommandFailedError
:
5218 self
.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5220 # compare output with expected output
5221 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
5223 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5224 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5225 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5227 # verify trash dir is clean.
5228 self
._wait
_for
_trash
_empty
()
5230 def test_subvolume_snapshot_metadata_list_if_no_metadata_set(self
):
5232 List custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5234 subvolname
= self
._generate
_random
_subvolume
_name
()
5235 group
= self
._generate
_random
_group
_name
()
5236 snapshot
= self
._generate
_random
_snapshot
_name
()
5239 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5241 # create subvolume in group.
5242 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5244 # snapshot subvolume
5245 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5249 ret_dict
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "metadata", "ls", self
.volname
, subvolname
, snapshot
, group
))
5250 except CommandFailedError
:
5251 self
.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5253 # compare output with expected output
5255 self
.assertDictEqual(ret_dict
, empty_dict
)
5257 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5258 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5259 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5261 # verify trash dir is clean.
5262 self
._wait
_for
_trash
_empty
()
5264 def test_subvolume_snapshot_metadata_remove(self
):
5266 Remove custom metadata for a specified key in subvolume snapshot metadata.
5268 subvolname
= self
._generate
_random
_subvolume
_name
()
5269 group
= self
._generate
_random
_group
_name
()
5270 snapshot
= self
._generate
_random
_snapshot
_name
()
5273 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5275 # create subvolume in group.
5276 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5278 # snapshot subvolume
5279 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5281 # set metadata for snapshot.
5284 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5286 # remove metadata against specified key.
5288 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
)
5289 except CommandFailedError
:
5290 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5292 # confirm key is removed by again fetching metadata
5294 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, key
, snapshot
, group
)
5295 except CommandFailedError
as e
:
5296 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5298 self
.fail("Expected ENOENT because key does not exist")
5300 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5301 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5302 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5304 # verify trash dir is clean.
5305 self
._wait
_for
_trash
_empty
()
5307 def test_subvolume_snapshot_metadata_remove_for_nonexisting_key(self
):
5309 Remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5311 subvolname
= self
._generate
_random
_subvolume
_name
()
5312 group
= self
._generate
_random
_group
_name
()
5313 snapshot
= self
._generate
_random
_snapshot
_name
()
5316 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5318 # create subvolume in group.
5319 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5321 # snapshot subvolume
5322 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5324 # set metadata for snapshot.
5327 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5329 # try to remove value for nonexisting key
5330 # Expecting ENOENT exit status because key does not exist
5332 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, "key_nonexist", group
)
5333 except CommandFailedError
as e
:
5334 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5336 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
5338 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5339 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5340 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5342 # verify trash dir is clean.
5343 self
._wait
_for
_trash
_empty
()
5345 def test_subvolume_snapshot_metadata_remove_for_nonexisting_section(self
):
5347 Remove custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5349 subvolname
= self
._generate
_random
_subvolume
_name
()
5350 group
= self
._generate
_random
_group
_name
()
5351 snapshot
= self
._generate
_random
_snapshot
_name
()
5354 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5356 # create subvolume in group.
5357 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5359 # snapshot subvolume
5360 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5362 # try to remove value for nonexisting key (as section does not exist)
5363 # Expecting ENOENT exit status because key does not exist
5365 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, "key", group
)
5366 except CommandFailedError
as e
:
5367 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5369 self
.fail("Expected ENOENT because section does not exist")
5371 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5372 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5373 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5375 # verify trash dir is clean.
5376 self
._wait
_for
_trash
_empty
()
5378 def test_subvolume_snapshot_metadata_remove_force(self
):
5380 Forcefully remove custom metadata for a specified key in subvolume snapshot metadata.
5382 subvolname
= self
._generate
_random
_subvolume
_name
()
5383 group
= self
._generate
_random
_group
_name
()
5384 snapshot
= self
._generate
_random
_snapshot
_name
()
5387 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5389 # create subvolume in group.
5390 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5392 # snapshot subvolume
5393 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5395 # set metadata for snapshot.
5398 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5400 # remove metadata against specified key with --force option.
5402 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
, "--force")
5403 except CommandFailedError
:
5404 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5406 # confirm key is removed by again fetching metadata
5408 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5409 except CommandFailedError
as e
:
5410 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5412 self
.fail("Expected ENOENT because key does not exist")
5414 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5415 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5416 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5418 # verify trash dir is clean.
5419 self
._wait
_for
_trash
_empty
()
5421 def test_subvolume_snapshot_metadata_remove_force_for_nonexisting_key(self
):
5423 Forcefully remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5425 subvolname
= self
._generate
_random
_subvolume
_name
()
5426 group
= self
._generate
_random
_group
_name
()
5427 snapshot
= self
._generate
_random
_snapshot
_name
()
5430 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5432 # create subvolume in group.
5433 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5435 # snapshot subvolume
5436 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5438 # set metadata for snapshot.
5441 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5443 # remove metadata against specified key.
5445 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
)
5446 except CommandFailedError
:
5447 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5449 # confirm key is removed by again fetching metadata
5451 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5452 except CommandFailedError
as e
:
5453 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5455 self
.fail("Expected ENOENT because key does not exist")
5457 # again remove metadata against already removed key with --force option.
5459 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
, "--force")
5460 except CommandFailedError
:
5461 self
.fail("expected the 'fs subvolume snapshot metadata rm' (with --force) command to succeed")
5463 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5464 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5465 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5467 # verify trash dir is clean.
5468 self
._wait
_for
_trash
_empty
()
5470 def test_subvolume_snapshot_metadata_after_snapshot_remove(self
):
5472 Verify metadata removal of subvolume snapshot after snapshot removal.
5474 subvolname
= self
._generate
_random
_subvolume
_name
()
5475 group
= self
._generate
_random
_group
_name
()
5476 snapshot
= self
._generate
_random
_snapshot
_name
()
5479 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5481 # create subvolume in group.
5482 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5484 # snapshot subvolume
5485 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5487 # set metadata for snapshot.
5490 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5492 # get value for specified key.
5493 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5495 # remove '\n' from returned value.
5496 ret
= ret
.strip('\n')
5498 # match received value with expected value.
5499 self
.assertEqual(value
, ret
)
5501 # remove subvolume snapshot.
5502 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5504 # try to get metadata after removing snapshot.
5505 # Expecting error ENOENT with error message of snapshot does not exist
5506 cmd_ret
= self
.mgr_cluster
.mon_manager
.run_cluster_cmd(
5507 args
=["fs", "subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
],
5508 check_status
=False, stdout
=StringIO(), stderr
=StringIO())
5509 self
.assertEqual(cmd_ret
.returncode
, errno
.ENOENT
, "Expecting ENOENT error")
5510 self
.assertIn(f
"snapshot '{snapshot}' does not exist", cmd_ret
.stderr
.getvalue(),
5511 f
"Expecting message: snapshot '{snapshot}' does not exist ")
5513 # confirm metadata is removed by searching section name in .meta file
5514 meta_path
= os
.path
.join(".", "volumes", group
, subvolname
, ".meta")
5515 section_name
= "SNAP_METADATA_" + snapshot
5518 self
.mount_a
.run_shell(f
"sudo grep {section_name} {meta_path}", omit_sudo
=False)
5519 except CommandFailedError
as e
:
5520 self
.assertNotEqual(e
.exitstatus
, 0)
5522 self
.fail("Expected non-zero exist status because section should not exist")
5524 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5525 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5527 # verify trash dir is clean.
5528 self
._wait
_for
_trash
_empty
()
5530 def test_clean_stale_subvolume_snapshot_metadata(self
):
5532 Validate cleaning of stale subvolume snapshot metadata.
5534 subvolname
= self
._generate
_random
_subvolume
_name
()
5535 group
= self
._generate
_random
_group
_name
()
5536 snapshot
= self
._generate
_random
_snapshot
_name
()
5539 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5541 # create subvolume in group.
5542 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5544 # snapshot subvolume
5545 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5547 # set metadata for snapshot.
5551 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5552 except CommandFailedError
:
5553 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5555 # save the subvolume config file.
5556 meta_path
= os
.path
.join(".", "volumes", group
, subvolname
, ".meta")
5557 tmp_meta_path
= os
.path
.join(".", "volumes", group
, subvolname
, ".meta.stale_snap_section")
5558 self
.mount_a
.run_shell(['sudo', 'cp', '-p', meta_path
, tmp_meta_path
], omit_sudo
=False)
5560 # Delete snapshot, this would remove user snap metadata
5561 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5563 # Copy back saved subvolume config file. This would have stale snapshot metadata
5564 self
.mount_a
.run_shell(['sudo', 'cp', '-p', tmp_meta_path
, meta_path
], omit_sudo
=False)
5566 # Verify that it has stale snapshot metadata
5567 section_name
= "SNAP_METADATA_" + snapshot
5569 self
.mount_a
.run_shell(f
"sudo grep {section_name} {meta_path}", omit_sudo
=False)
5570 except CommandFailedError
:
5571 self
.fail("Expected grep cmd to succeed because stale snapshot metadata exist")
5573 # Do any subvolume operation to clean the stale snapshot metadata
5574 _
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolname
, group
))
5576 # Verify that the stale snapshot metadata is cleaned
5578 self
.mount_a
.run_shell(f
"sudo grep {section_name} {meta_path}", omit_sudo
=False)
5579 except CommandFailedError
as e
:
5580 self
.assertNotEqual(e
.exitstatus
, 0)
5582 self
.fail("Expected non-zero exist status because stale snapshot metadata should not exist")
5584 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5585 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5587 # verify trash dir is clean.
5588 self
._wait
_for
_trash
_empty
()
5589 # Clean tmp config file
5590 self
.mount_a
.run_shell(['sudo', 'rm', '-f', tmp_meta_path
], omit_sudo
=False)
5593 class TestSubvolumeSnapshotClones(TestVolumesHelper
):
5594 """ Tests for FS subvolume snapshot clone operations."""
5595 def test_clone_subvolume_info(self
):
5596 # tests the 'fs subvolume info' command for a clone
5597 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
5598 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
5601 subvolume
= self
._generate
_random
_subvolume
_name
()
5602 snapshot
= self
._generate
_random
_snapshot
_name
()
5603 clone
= self
._generate
_random
_clone
_name
()
5606 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5609 self
._do
_subvolume
_io
(subvolume
, number_of_files
=1)
5611 # snapshot subvolume
5612 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5615 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5617 # check clone status
5618 self
._wait
_for
_clone
_to
_complete
(clone
)
5621 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5623 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, clone
))
5624 if len(subvol_info
) == 0:
5625 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
5626 for md
in subvol_md
:
5627 if md
not in subvol_info
.keys():
5628 raise RuntimeError("%s not present in the metadata of subvolume" % md
)
5629 if subvol_info
["type"] != "clone":
5630 raise RuntimeError("type should be set to clone")
5633 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5634 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5636 # verify trash dir is clean
5637 self
._wait
_for
_trash
_empty
()
5639 def test_subvolume_snapshot_info_without_snapshot_clone(self
):
5641 Verify subvolume snapshot info output without cloning snapshot.
5642 If no clone is performed then path /volumes/_index/clone/{track_id}
5645 subvolume
= self
._generate
_random
_subvolume
_name
()
5646 snapshot
= self
._generate
_random
_snapshot
_name
()
5649 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5651 # snapshot subvolume
5652 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5654 # list snapshot info
5655 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5657 # verify snapshot info
5658 self
.assertEqual(result
['has_pending_clones'], "no")
5659 self
.assertFalse('orphan_clones_count' in result
)
5660 self
.assertFalse('pending_clones' in result
)
5662 # remove snapshot, subvolume, clone
5663 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5664 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5666 # verify trash dir is clean
5667 self
._wait
_for
_trash
_empty
()
5669 def test_subvolume_snapshot_info_if_no_clone_pending(self
):
5671 Verify subvolume snapshot info output if no clone is in pending state.
5673 subvolume
= self
._generate
_random
_subvolume
_name
()
5674 snapshot
= self
._generate
_random
_snapshot
_name
()
5675 clone_list
= [f
'clone_{i}' for i
in range(3)]
5678 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5680 # snapshot subvolume
5681 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5684 for clone
in clone_list
:
5685 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5687 # check clones status
5688 for clone
in clone_list
:
5689 self
._wait
_for
_clone
_to
_complete
(clone
)
5691 # list snapshot info
5692 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5694 # verify snapshot info
5695 self
.assertEqual(result
['has_pending_clones'], "no")
5696 self
.assertFalse('orphan_clones_count' in result
)
5697 self
.assertFalse('pending_clones' in result
)
5699 # remove snapshot, subvolume, clone
5700 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5701 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5702 for clone
in clone_list
:
5703 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5705 # verify trash dir is clean
5706 self
._wait
_for
_trash
_empty
()
5708 def test_subvolume_snapshot_info_if_clone_pending_for_no_group(self
):
5710 Verify subvolume snapshot info output if clones are in pending state.
5711 Clones are not specified for particular target_group. Hence target_group
5712 should not be in the output as we don't show _nogroup (default group)
5714 subvolume
= self
._generate
_random
_subvolume
_name
()
5715 snapshot
= self
._generate
_random
_snapshot
_name
()
5716 clone_list
= [f
'clone_{i}' for i
in range(3)]
5719 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5721 # snapshot subvolume
5722 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5724 # insert delay at the beginning of snapshot clone
5725 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5728 for clone
in clone_list
:
5729 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5731 # list snapshot info
5732 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5734 # verify snapshot info
5735 expected_clone_list
= []
5736 for clone
in clone_list
:
5737 expected_clone_list
.append({"name": clone
})
5738 self
.assertEqual(result
['has_pending_clones'], "yes")
5739 self
.assertFalse('orphan_clones_count' in result
)
5740 self
.assertListEqual(result
['pending_clones'], expected_clone_list
)
5741 self
.assertEqual(len(result
['pending_clones']), 3)
5743 # check clones status
5744 for clone
in clone_list
:
5745 self
._wait
_for
_clone
_to
_complete
(clone
)
5747 # remove snapshot, subvolume, clone
5748 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5749 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5750 for clone
in clone_list
:
5751 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5753 # verify trash dir is clean
5754 self
._wait
_for
_trash
_empty
()
5756 def test_subvolume_snapshot_info_if_clone_pending_for_target_group(self
):
5758 Verify subvolume snapshot info output if clones are in pending state.
5759 Clones are not specified for target_group.
5761 subvolume
= self
._generate
_random
_subvolume
_name
()
5762 snapshot
= self
._generate
_random
_snapshot
_name
()
5763 clone
= self
._generate
_random
_clone
_name
()
5764 group
= self
._generate
_random
_group
_name
()
5765 target_group
= self
._generate
_random
_group
_name
()
5768 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5769 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, target_group
)
5772 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, group
, "--mode=777")
5774 # snapshot subvolume
5775 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
5777 # insert delay at the beginning of snapshot clone
5778 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5781 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
,
5782 "--group_name", group
, "--target_group_name", target_group
)
5784 # list snapshot info
5785 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
, "--group_name", group
))
5787 # verify snapshot info
5788 expected_clone_list
= [{"name": clone
, "target_group": target_group
}]
5789 self
.assertEqual(result
['has_pending_clones'], "yes")
5790 self
.assertFalse('orphan_clones_count' in result
)
5791 self
.assertListEqual(result
['pending_clones'], expected_clone_list
)
5792 self
.assertEqual(len(result
['pending_clones']), 1)
5794 # check clone status
5795 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=target_group
)
5798 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
5801 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
5802 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, target_group
)
5805 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5806 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, target_group
)
5808 # verify trash dir is clean
5809 self
._wait
_for
_trash
_empty
()
5811 def test_subvolume_snapshot_info_if_orphan_clone(self
):
5813 Verify subvolume snapshot info output if orphan clones exists.
5814 Orphan clones should not list under pending clones.
5815 orphan_clones_count should display correct count of orphan clones'
5817 subvolume
= self
._generate
_random
_subvolume
_name
()
5818 snapshot
= self
._generate
_random
_snapshot
_name
()
5819 clone_list
= [f
'clone_{i}' for i
in range(3)]
5822 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5824 # snapshot subvolume
5825 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5827 # insert delay at the beginning of snapshot clone
5828 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 15)
5831 for clone
in clone_list
:
5832 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5834 # remove track file for third clone to make it orphan
5835 meta_path
= os
.path
.join(".", "volumes", "_nogroup", subvolume
, ".meta")
5836 pending_clones_result
= self
.mount_a
.run_shell(['sudo', 'grep', 'clone snaps', '-A3', meta_path
], omit_sudo
=False, stdout
=StringIO(), stderr
=StringIO())
5837 third_clone_track_id
= pending_clones_result
.stdout
.getvalue().splitlines()[3].split(" = ")[0]
5838 third_clone_track_path
= os
.path
.join(".", "volumes", "_index", "clone", third_clone_track_id
)
5839 self
.mount_a
.run_shell(f
"sudo rm -f {third_clone_track_path}", omit_sudo
=False)
5841 # list snapshot info
5842 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5844 # verify snapshot info
5845 expected_clone_list
= []
5846 for i
in range(len(clone_list
)-1):
5847 expected_clone_list
.append({"name": clone_list
[i
]})
5848 self
.assertEqual(result
['has_pending_clones'], "yes")
5849 self
.assertEqual(result
['orphan_clones_count'], 1)
5850 self
.assertListEqual(result
['pending_clones'], expected_clone_list
)
5851 self
.assertEqual(len(result
['pending_clones']), 2)
5853 # check clones status
5854 for i
in range(len(clone_list
)-1):
5855 self
._wait
_for
_clone
_to
_complete
(clone_list
[i
])
5857 # list snapshot info after cloning completion
5858 res
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5860 # verify snapshot info (has_pending_clones should be no)
5861 self
.assertEqual(res
['has_pending_clones'], "no")
5863 def test_non_clone_status(self
):
5864 subvolume
= self
._generate
_random
_subvolume
_name
()
5867 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
5870 self
._fs
_cmd
("clone", "status", self
.volname
, subvolume
)
5871 except CommandFailedError
as ce
:
5872 if ce
.exitstatus
!= errno
.ENOTSUP
:
5873 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
5875 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
5878 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5880 # verify trash dir is clean
5881 self
._wait
_for
_trash
_empty
()
5883 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self
):
5884 subvolume
= self
._generate
_random
_subvolume
_name
()
5885 snapshot
= self
._generate
_random
_snapshot
_name
()
5886 clone
= self
._generate
_random
_clone
_name
()
5887 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*12
5889 # create subvolume, in an isolated namespace with a specified size
5890 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--namespace-isolated", "--size", str(osize
), "--mode=777")
5893 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
5895 # snapshot subvolume
5896 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5898 # create a pool different from current subvolume pool
5899 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
5900 default_pool
= self
.mount_a
.getfattr(subvol_path
, "ceph.dir.layout.pool")
5901 new_pool
= "new_pool"
5902 self
.assertNotEqual(default_pool
, new_pool
)
5903 self
.fs
.add_data_pool(new_pool
)
5905 # update source subvolume pool
5906 self
._do
_subvolume
_pool
_and
_namespace
_update
(subvolume
, pool
=new_pool
, pool_namespace
="")
5908 # schedule a clone, with NO --pool specification
5909 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5911 # check clone status
5912 self
._wait
_for
_clone
_to
_complete
(clone
)
5915 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5918 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5921 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5922 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5924 # verify trash dir is clean
5925 self
._wait
_for
_trash
_empty
()
5927 def test_subvolume_clone_inherit_quota_attrs(self
):
5928 subvolume
= self
._generate
_random
_subvolume
_name
()
5929 snapshot
= self
._generate
_random
_snapshot
_name
()
5930 clone
= self
._generate
_random
_clone
_name
()
5931 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*12
5933 # create subvolume with a specified size
5934 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777", "--size", str(osize
))
5937 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
5939 # get subvolume path
5940 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
5942 # set quota on number of files
5943 self
.mount_a
.setfattr(subvolpath
, 'ceph.quota.max_files', "20", sudo
=True)
5945 # snapshot subvolume
5946 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5949 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5951 # check clone status
5952 self
._wait
_for
_clone
_to
_complete
(clone
)
5955 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5957 # get subvolume path
5958 clonepath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
5960 # verify quota max_files is inherited from source snapshot
5961 subvol_quota
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_files")
5962 clone_quota
= self
.mount_a
.getfattr(clonepath
, "ceph.quota.max_files")
5963 self
.assertEqual(subvol_quota
, clone_quota
)
5966 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5969 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5970 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5972 # verify trash dir is clean
5973 self
._wait
_for
_trash
_empty
()
5975 def test_subvolume_clone_in_progress_getpath(self
):
5976 subvolume
= self
._generate
_random
_subvolume
_name
()
5977 snapshot
= self
._generate
_random
_snapshot
_name
()
5978 clone
= self
._generate
_random
_clone
_name
()
5981 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5984 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
5986 # snapshot subvolume
5987 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5989 # Insert delay at the beginning of snapshot clone
5990 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5993 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5995 # clone should not be accessible right now
5997 self
._get
_subvolume
_path
(self
.volname
, clone
)
5998 except CommandFailedError
as ce
:
5999 if ce
.exitstatus
!= errno
.EAGAIN
:
6000 raise RuntimeError("invalid error code when fetching path of an pending clone")
6002 raise RuntimeError("expected fetching path of an pending clone to fail")
6004 # check clone status
6005 self
._wait
_for
_clone
_to
_complete
(clone
)
6007 # clone should be accessible now
6008 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
6009 self
.assertNotEqual(subvolpath
, None)
6012 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6015 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6018 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6019 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6021 # verify trash dir is clean
6022 self
._wait
_for
_trash
_empty
()
6024 def test_subvolume_clone_in_progress_snapshot_rm(self
):
6025 subvolume
= self
._generate
_random
_subvolume
_name
()
6026 snapshot
= self
._generate
_random
_snapshot
_name
()
6027 clone
= self
._generate
_random
_clone
_name
()
6030 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6033 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6035 # snapshot subvolume
6036 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6038 # Insert delay at the beginning of snapshot clone
6039 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6042 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6044 # snapshot should not be deletable now
6046 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6047 except CommandFailedError
as ce
:
6048 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, msg
="invalid error code when removing source snapshot of a clone")
6050 self
.fail("expected removing source snapshot of a clone to fail")
6052 # check clone status
6053 self
._wait
_for
_clone
_to
_complete
(clone
)
6055 # clone should be accessible now
6056 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
6057 self
.assertNotEqual(subvolpath
, None)
6060 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6063 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6066 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6067 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6069 # verify trash dir is clean
6070 self
._wait
_for
_trash
_empty
()
6072 def test_subvolume_clone_in_progress_source(self
):
6073 subvolume
= self
._generate
_random
_subvolume
_name
()
6074 snapshot
= self
._generate
_random
_snapshot
_name
()
6075 clone
= self
._generate
_random
_clone
_name
()
6078 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6081 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6083 # snapshot subvolume
6084 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6086 # Insert delay at the beginning of snapshot clone
6087 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6090 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6092 # verify clone source
6093 result
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, clone
))
6094 source
= result
['status']['source']
6095 self
.assertEqual(source
['volume'], self
.volname
)
6096 self
.assertEqual(source
['subvolume'], subvolume
)
6097 self
.assertEqual(source
.get('group', None), None)
6098 self
.assertEqual(source
['snapshot'], snapshot
)
6100 # check clone status
6101 self
._wait
_for
_clone
_to
_complete
(clone
)
6103 # clone should be accessible now
6104 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
6105 self
.assertNotEqual(subvolpath
, None)
6108 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6111 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6114 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6115 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6117 # verify trash dir is clean
6118 self
._wait
_for
_trash
_empty
()
6120 def test_subvolume_clone_retain_snapshot_with_snapshots(self
):
6122 retain snapshots of a cloned subvolume and check disallowed operations
6124 subvolume
= self
._generate
_random
_subvolume
_name
()
6125 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
6126 clone
= self
._generate
_random
_clone
_name
()
6129 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6131 # store path for clone verification
6132 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6135 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6137 # snapshot subvolume
6138 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
6140 # remove with snapshot retention
6141 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6143 # clone retained subvolume snapshot
6144 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot1
, clone
)
6146 # check clone status
6147 self
._wait
_for
_clone
_to
_complete
(clone
)
6150 self
._verify
_clone
(subvolume
, snapshot1
, clone
, subvol_path
=subvol1_path
)
6152 # create a snapshot on the clone
6153 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone
, snapshot2
)
6156 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--retain-snapshots")
6159 clonesnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, clone
))
6160 self
.assertEqual(len(clonesnapshotls
), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
6161 " created subvolume snapshots")
6162 snapshotnames
= [snapshot
['name'] for snapshot
in clonesnapshotls
]
6163 for snap
in [snapshot2
]:
6164 self
.assertIn(snap
, snapshotnames
, "Missing snapshot '{0}' in snapshot list".format(snap
))
6166 ## check disallowed operations on retained clone
6169 self
._fs
_cmd
("clone", "status", self
.volname
, clone
)
6170 except CommandFailedError
as ce
:
6171 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on clone status of clone with retained snapshots")
6173 self
.fail("expected clone status of clone with retained snapshots to fail")
6177 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6178 except CommandFailedError
as ce
:
6179 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on clone cancel of clone with retained snapshots")
6181 self
.fail("expected clone cancel of clone with retained snapshots to fail")
6183 # remove snapshots (removes subvolumes as all are in retained state)
6184 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
6185 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone
, snapshot2
)
6187 # verify list subvolumes returns an empty list
6188 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6189 self
.assertEqual(len(subvolumels
), 0)
6191 # verify trash dir is clean
6192 self
._wait
_for
_trash
_empty
()
6194 def test_subvolume_retain_snapshot_clone(self
):
6196 clone a snapshot from a snapshot retained subvolume
6198 subvolume
= self
._generate
_random
_subvolume
_name
()
6199 snapshot
= self
._generate
_random
_snapshot
_name
()
6200 clone
= self
._generate
_random
_clone
_name
()
6203 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6205 # store path for clone verification
6206 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6209 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6211 # snapshot subvolume
6212 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6214 # remove with snapshot retention
6215 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6217 # clone retained subvolume snapshot
6218 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6220 # check clone status
6221 self
._wait
_for
_clone
_to
_complete
(clone
)
6224 self
._verify
_clone
(subvolume
, snapshot
, clone
, subvol_path
=subvol_path
)
6226 # remove snapshots (removes retained volume)
6227 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6230 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6232 # verify list subvolumes returns an empty list
6233 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6234 self
.assertEqual(len(subvolumels
), 0)
6236 # verify trash dir is clean
6237 self
._wait
_for
_trash
_empty
()
6239 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self
):
6241 clone a subvolume from recreated subvolume's latest snapshot
6243 subvolume
= self
._generate
_random
_subvolume
_name
()
6244 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
6245 clone
= self
._generate
_random
_clone
_name
(1)
6248 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6251 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6253 # snapshot subvolume
6254 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
6256 # remove with snapshot retention
6257 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6259 # recreate subvolume
6260 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6262 # get and store path for clone verification
6263 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6266 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6268 # snapshot newer subvolume
6269 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot2
)
6271 # remove with snapshot retention
6272 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6274 # clone retained subvolume's newer snapshot
6275 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot2
, clone
)
6277 # check clone status
6278 self
._wait
_for
_clone
_to
_complete
(clone
)
6281 self
._verify
_clone
(subvolume
, snapshot2
, clone
, subvol_path
=subvol2_path
)
6284 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
6285 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot2
)
6288 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6290 # verify list subvolumes returns an empty list
6291 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6292 self
.assertEqual(len(subvolumels
), 0)
6294 # verify trash dir is clean
6295 self
._wait
_for
_trash
_empty
()
6297 def test_subvolume_retain_snapshot_recreate(self
):
6299 recreate a subvolume from one of its retained snapshots
6301 subvolume
= self
._generate
_random
_subvolume
_name
()
6302 snapshot
= self
._generate
_random
_snapshot
_name
()
6305 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6307 # store path for clone verification
6308 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6311 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6313 # snapshot subvolume
6314 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6316 # remove with snapshot retention
6317 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6319 # recreate retained subvolume using its own snapshot to clone
6320 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, subvolume
)
6322 # check clone status
6323 self
._wait
_for
_clone
_to
_complete
(subvolume
)
6326 self
._verify
_clone
(subvolume
, snapshot
, subvolume
, subvol_path
=subvol_path
)
6329 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6332 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6334 # verify list subvolumes returns an empty list
6335 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6336 self
.assertEqual(len(subvolumels
), 0)
6338 # verify trash dir is clean
6339 self
._wait
_for
_trash
_empty
()
6341 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self
):
6343 ensure retained clone recreate fails if its trash is not yet purged
6345 subvolume
= self
._generate
_random
_subvolume
_name
()
6346 snapshot
= self
._generate
_random
_snapshot
_name
()
6347 clone
= self
._generate
_random
_clone
_name
()
6350 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
6352 # snapshot subvolume
6353 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6355 # clone subvolume snapshot
6356 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6358 # check clone status
6359 self
._wait
_for
_clone
_to
_complete
(clone
)
6362 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone
, snapshot
)
6364 # remove clone with snapshot retention
6365 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--retain-snapshots")
6367 # fake a trash entry
6368 self
._update
_fake
_trash
(clone
)
6370 # clone subvolume snapshot (recreate)
6372 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6373 except CommandFailedError
as ce
:
6374 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on recreate of clone with purge pending")
6376 self
.fail("expected recreate of clone with purge pending to fail")
6378 # clear fake trash entry
6379 self
._update
_fake
_trash
(clone
, create
=False)
6381 # recreate subvolume
6382 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6384 # check clone status
6385 self
._wait
_for
_clone
_to
_complete
(clone
)
6388 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6389 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone
, snapshot
)
6392 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6393 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6395 # verify trash dir is clean
6396 self
._wait
_for
_trash
_empty
()
6398 def test_subvolume_snapshot_attr_clone(self
):
6399 subvolume
= self
._generate
_random
_subvolume
_name
()
6400 snapshot
= self
._generate
_random
_snapshot
_name
()
6401 clone
= self
._generate
_random
_clone
_name
()
6404 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6407 self
._do
_subvolume
_io
_mixed
(subvolume
)
6409 # snapshot subvolume
6410 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6413 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6415 # check clone status
6416 self
._wait
_for
_clone
_to
_complete
(clone
)
6419 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6422 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6425 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6426 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6428 # verify trash dir is clean
6429 self
._wait
_for
_trash
_empty
()
6431 def test_clone_failure_status_pending_in_progress_complete(self
):
6433 ensure failure status is not shown when clone is not in failed/cancelled state
6435 subvolume
= self
._generate
_random
_subvolume
_name
()
6436 snapshot
= self
._generate
_random
_snapshot
_name
()
6437 clone1
= self
._generate
_random
_clone
_name
()
6440 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6443 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6445 # snapshot subvolume
6446 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6448 # Insert delay at the beginning of snapshot clone
6449 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6452 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6454 # pending clone shouldn't show failure status
6455 clone1_result
= self
._get
_clone
_status
(clone1
)
6457 clone1_result
["status"]["failure"]["errno"]
6458 except KeyError as e
:
6459 self
.assertEqual(str(e
), "'failure'")
6461 self
.fail("clone status shouldn't show failure for pending clone")
6463 # check clone1 to be in-progress
6464 self
._wait
_for
_clone
_to
_be
_in
_progress
(clone1
)
6466 # in-progress clone1 shouldn't show failure status
6467 clone1_result
= self
._get
_clone
_status
(clone1
)
6469 clone1_result
["status"]["failure"]["errno"]
6470 except KeyError as e
:
6471 self
.assertEqual(str(e
), "'failure'")
6473 self
.fail("clone status shouldn't show failure for in-progress clone")
6475 # wait for clone1 to complete
6476 self
._wait
_for
_clone
_to
_complete
(clone1
)
6478 # complete clone1 shouldn't show failure status
6479 clone1_result
= self
._get
_clone
_status
(clone1
)
6481 clone1_result
["status"]["failure"]["errno"]
6482 except KeyError as e
:
6483 self
.assertEqual(str(e
), "'failure'")
6485 self
.fail("clone status shouldn't show failure for complete clone")
6488 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6491 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6492 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
6494 # verify trash dir is clean
6495 self
._wait
_for
_trash
_empty
()
6497 def test_clone_failure_status_failed(self
):
6499 ensure failure status is shown when clone is in failed state and validate the reason
6501 subvolume
= self
._generate
_random
_subvolume
_name
()
6502 snapshot
= self
._generate
_random
_snapshot
_name
()
6503 clone1
= self
._generate
_random
_clone
_name
()
6506 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6509 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6511 # snapshot subvolume
6512 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6514 # Insert delay at the beginning of snapshot clone
6515 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6518 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6520 # remove snapshot from backend to force the clone failure.
6521 snappath
= os
.path
.join(".", "volumes", "_nogroup", subvolume
, ".snap", snapshot
)
6522 self
.mount_a
.run_shell(['sudo', 'rmdir', snappath
], omit_sudo
=False)
6524 # wait for clone1 to fail.
6525 self
._wait
_for
_clone
_to
_fail
(clone1
)
6527 # check clone1 status
6528 clone1_result
= self
._get
_clone
_status
(clone1
)
6529 self
.assertEqual(clone1_result
["status"]["state"], "failed")
6530 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "2")
6531 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "snapshot '{0}' does not exist".format(snapshot
))
6533 # clone removal should succeed after failure, remove clone1
6534 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
6537 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6539 # verify trash dir is clean
6540 self
._wait
_for
_trash
_empty
()
6542 def test_clone_failure_status_pending_cancelled(self
):
6544 ensure failure status is shown when clone is cancelled during pending state and validate the reason
6546 subvolume
= self
._generate
_random
_subvolume
_name
()
6547 snapshot
= self
._generate
_random
_snapshot
_name
()
6548 clone1
= self
._generate
_random
_clone
_name
()
6551 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6554 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6556 # snapshot subvolume
6557 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6559 # Insert delay at the beginning of snapshot clone
6560 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6563 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6565 # cancel pending clone1
6566 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone1
)
6568 # check clone1 status
6569 clone1_result
= self
._get
_clone
_status
(clone1
)
6570 self
.assertEqual(clone1_result
["status"]["state"], "canceled")
6571 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "4")
6572 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "user interrupted clone operation")
6574 # clone removal should succeed with force after cancelled, remove clone1
6575 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
6578 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6581 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6583 # verify trash dir is clean
6584 self
._wait
_for
_trash
_empty
()
6586 def test_clone_failure_status_in_progress_cancelled(self
):
6588 ensure failure status is shown when clone is cancelled during in-progress state and validate the reason
6590 subvolume
= self
._generate
_random
_subvolume
_name
()
6591 snapshot
= self
._generate
_random
_snapshot
_name
()
6592 clone1
= self
._generate
_random
_clone
_name
()
6595 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6598 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6600 # snapshot subvolume
6601 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6603 # Insert delay at the beginning of snapshot clone
6604 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6607 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6609 # wait for clone1 to be in-progress
6610 self
._wait
_for
_clone
_to
_be
_in
_progress
(clone1
)
6612 # cancel in-progess clone1
6613 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone1
)
6615 # check clone1 status
6616 clone1_result
= self
._get
_clone
_status
(clone1
)
6617 self
.assertEqual(clone1_result
["status"]["state"], "canceled")
6618 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "4")
6619 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "user interrupted clone operation")
6621 # clone removal should succeed with force after cancelled, remove clone1
6622 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
6625 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6628 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6630 # verify trash dir is clean
6631 self
._wait
_for
_trash
_empty
()
6633 def test_subvolume_snapshot_clone(self
):
6634 subvolume
= self
._generate
_random
_subvolume
_name
()
6635 snapshot
= self
._generate
_random
_snapshot
_name
()
6636 clone
= self
._generate
_random
_clone
_name
()
6639 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6642 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6644 # snapshot subvolume
6645 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6648 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6650 # check clone status
6651 self
._wait
_for
_clone
_to
_complete
(clone
)
6654 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6657 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6660 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6661 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6663 # verify trash dir is clean
6664 self
._wait
_for
_trash
_empty
()
6666 def test_subvolume_snapshot_clone_quota_exceeded(self
):
6667 subvolume
= self
._generate
_random
_subvolume
_name
()
6668 snapshot
= self
._generate
_random
_snapshot
_name
()
6669 clone
= self
._generate
_random
_clone
_name
()
6671 # create subvolume with 20MB quota
6672 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
6673 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
,"--mode=777", "--size", str(osize
))
6675 # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
6677 self
._do
_subvolume
_io
(subvolume
, number_of_files
=50)
6678 except CommandFailedError
:
6679 # ignore quota enforcement error.
6682 # snapshot subvolume
6683 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6686 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6688 # check clone status
6689 self
._wait
_for
_clone
_to
_complete
(clone
)
6692 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6695 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6698 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6699 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6701 # verify trash dir is clean
6702 self
._wait
_for
_trash
_empty
()
6704 def test_subvolume_snapshot_in_complete_clone_rm(self
):
6706 Validates the removal of clone when it is not in 'complete|cancelled|failed' state.
6707 The forceful removl of subvolume clone succeeds only if it's in any of the
6708 'complete|cancelled|failed' states. It fails with EAGAIN in any other states.
6711 subvolume
= self
._generate
_random
_subvolume
_name
()
6712 snapshot
= self
._generate
_random
_snapshot
_name
()
6713 clone
= self
._generate
_random
_clone
_name
()
6716 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6719 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6721 # snapshot subvolume
6722 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6724 # Insert delay at the beginning of snapshot clone
6725 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6728 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6730 # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled.
6732 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6733 except CommandFailedError
as ce
:
6734 if ce
.exitstatus
!= errno
.EAGAIN
:
6735 raise RuntimeError("invalid error code when trying to remove failed clone")
6737 raise RuntimeError("expected error when removing a failed clone")
6739 # cancel on-going clone
6740 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6742 # verify canceled state
6743 self
._check
_clone
_canceled
(clone
)
6745 # clone removal should succeed after cancel
6746 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6749 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6752 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6754 # verify trash dir is clean
6755 self
._wait
_for
_trash
_empty
()
6757 def test_subvolume_snapshot_clone_retain_suid_guid(self
):
6758 subvolume
= self
._generate
_random
_subvolume
_name
()
6759 snapshot
= self
._generate
_random
_snapshot
_name
()
6760 clone
= self
._generate
_random
_clone
_name
()
6763 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6765 # Create a file with suid, guid bits set along with executable bit.
6766 args
= ["subvolume", "getpath", self
.volname
, subvolume
]
6768 subvolpath
= self
._fs
_cmd
(*args
)
6769 self
.assertNotEqual(subvolpath
, None)
6770 subvolpath
= subvolpath
[1:].rstrip() # remove "/" prefix and any trailing newline
6772 file_path
= subvolpath
6773 file_path
= os
.path
.join(subvolpath
, "test_suid_file")
6774 self
.mount_a
.run_shell(["touch", file_path
])
6775 self
.mount_a
.run_shell(["chmod", "u+sx,g+sx", file_path
])
6777 # snapshot subvolume
6778 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6781 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6783 # check clone status
6784 self
._wait
_for
_clone
_to
_complete
(clone
)
6787 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6790 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6793 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6794 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6796 # verify trash dir is clean
6797 self
._wait
_for
_trash
_empty
()
6799 def test_subvolume_snapshot_clone_and_reclone(self
):
6800 subvolume
= self
._generate
_random
_subvolume
_name
()
6801 snapshot
= self
._generate
_random
_snapshot
_name
()
6802 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
6805 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6808 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
6810 # snapshot subvolume
6811 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6814 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6816 # check clone status
6817 self
._wait
_for
_clone
_to
_complete
(clone1
)
6820 self
._verify
_clone
(subvolume
, snapshot
, clone1
)
6823 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6825 # now the clone is just like a normal subvolume -- snapshot the clone and fork
6826 # another clone. before that do some IO so it's can be differentiated.
6827 self
._do
_subvolume
_io
(clone1
, create_dir
="data", number_of_files
=32)
6829 # snapshot clone -- use same snap name
6830 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone1
, snapshot
)
6833 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, clone1
, snapshot
, clone2
)
6835 # check clone status
6836 self
._wait
_for
_clone
_to
_complete
(clone2
)
6839 self
._verify
_clone
(clone1
, snapshot
, clone2
)
6842 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone1
, snapshot
)
6845 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6846 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
6847 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
6849 # verify trash dir is clean
6850 self
._wait
_for
_trash
_empty
()
6852 def test_subvolume_snapshot_clone_cancel_in_progress(self
):
6853 subvolume
= self
._generate
_random
_subvolume
_name
()
6854 snapshot
= self
._generate
_random
_snapshot
_name
()
6855 clone
= self
._generate
_random
_clone
_name
()
6858 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6861 self
._do
_subvolume
_io
(subvolume
, number_of_files
=128)
6863 # snapshot subvolume
6864 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6866 # Insert delay at the beginning of snapshot clone
6867 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6870 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6872 # cancel on-going clone
6873 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6875 # verify canceled state
6876 self
._check
_clone
_canceled
(clone
)
6879 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6882 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6883 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6885 # verify trash dir is clean
6886 self
._wait
_for
_trash
_empty
()
6888 def test_subvolume_snapshot_clone_cancel_pending(self
):
6890 this test is a bit more involved compared to canceling an in-progress clone.
6891 we'd need to ensure that a to-be canceled clone has still not been picked up
6892 by cloner threads. exploit the fact that clones are picked up in an FCFS
6893 fashion and there are four (4) cloner threads by default. When the number of
6894 cloner threads increase, this test _may_ start tripping -- so, the number of
6895 clone operations would need to be jacked up.
6897 # default number of clone threads
6899 # good enough for 4 threads
6901 # yeh, 1gig -- we need the clone to run for sometime
6904 subvolume
= self
._generate
_random
_subvolume
_name
()
6905 snapshot
= self
._generate
_random
_snapshot
_name
()
6906 clones
= self
._generate
_random
_clone
_name
(NR_CLONES
)
6909 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6912 self
._do
_subvolume
_io
(subvolume
, number_of_files
=4, file_size
=FILE_SIZE_MB
)
6914 # snapshot subvolume
6915 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6918 for clone
in clones
:
6919 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6921 to_wait
= clones
[0:NR_THREADS
]
6922 to_cancel
= clones
[NR_THREADS
:]
6924 # cancel pending clones and verify
6925 for clone
in to_cancel
:
6926 status
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, clone
))
6927 self
.assertEqual(status
["status"]["state"], "pending")
6928 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6929 self
._check
_clone
_canceled
(clone
)
6931 # let's cancel on-going clones. handle the case where some of the clones
6933 for clone
in list(to_wait
):
6935 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6936 to_cancel
.append(clone
)
6937 to_wait
.remove(clone
)
6938 except CommandFailedError
as ce
:
6939 if ce
.exitstatus
!= errno
.EINVAL
:
6940 raise RuntimeError("invalid error code when cancelling on-going clone")
6943 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6946 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6947 for clone
in to_wait
:
6948 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6949 for clone
in to_cancel
:
6950 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6952 # verify trash dir is clean
6953 self
._wait
_for
_trash
_empty
()
6955 def test_subvolume_snapshot_clone_different_groups(self
):
6956 subvolume
= self
._generate
_random
_subvolume
_name
()
6957 snapshot
= self
._generate
_random
_snapshot
_name
()
6958 clone
= self
._generate
_random
_clone
_name
()
6959 s_group
, c_group
= self
._generate
_random
_group
_name
(2)
6962 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, s_group
)
6963 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, c_group
)
6966 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, s_group
, "--mode=777")
6969 self
._do
_subvolume
_io
(subvolume
, subvolume_group
=s_group
, number_of_files
=32)
6971 # snapshot subvolume
6972 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, s_group
)
6975 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
,
6976 '--group_name', s_group
, '--target_group_name', c_group
)
6978 # check clone status
6979 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=c_group
)
6982 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_group
=s_group
, clone_group
=c_group
)
6985 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, s_group
)
6988 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, s_group
)
6989 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, c_group
)
6992 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, s_group
)
6993 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, c_group
)
6995 # verify trash dir is clean
6996 self
._wait
_for
_trash
_empty
()
6998 def test_subvolume_snapshot_clone_fail_with_remove(self
):
6999 subvolume
= self
._generate
_random
_subvolume
_name
()
7000 snapshot
= self
._generate
_random
_snapshot
_name
()
7001 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
7003 pool_capacity
= 32 * 1024 * 1024
7004 # number of files required to fill up 99% of the pool
7005 nr_files
= int((pool_capacity
* 0.99) / (TestVolumes
.DEFAULT_FILE_SIZE
* 1024 * 1024))
7008 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
7011 self
._do
_subvolume
_io
(subvolume
, number_of_files
=nr_files
)
7013 # snapshot subvolume
7014 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7017 new_pool
= "new_pool"
7018 self
.fs
.add_data_pool(new_pool
)
7020 self
.fs
.mon_manager
.raw_cluster_cmd("osd", "pool", "set-quota", new_pool
,
7021 "max_bytes", "{0}".format(pool_capacity
// 4))
7024 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
, "--pool_layout", new_pool
)
7026 # check clone status -- this should dramatically overshoot the pool quota
7027 self
._wait
_for
_clone
_to
_complete
(clone1
)
7030 self
._verify
_clone
(subvolume
, snapshot
, clone1
, clone_pool
=new_pool
)
7032 # wait a bit so that subsequent I/O will give pool full error
7036 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone2
, "--pool_layout", new_pool
)
7038 # check clone status
7039 self
._wait
_for
_clone
_to
_fail
(clone2
)
7042 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7045 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7046 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
7048 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
7049 except CommandFailedError
as ce
:
7050 if ce
.exitstatus
!= errno
.EAGAIN
:
7051 raise RuntimeError("invalid error code when trying to remove failed clone")
7053 raise RuntimeError("expected error when removing a failed clone")
7055 # ... and with force, failed clone can be removed
7056 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
, "--force")
7058 # verify trash dir is clean
7059 self
._wait
_for
_trash
_empty
()
7061 def test_subvolume_snapshot_clone_on_existing_subvolumes(self
):
7062 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
7063 snapshot
= self
._generate
_random
_snapshot
_name
()
7064 clone
= self
._generate
_random
_clone
_name
()
7067 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--mode=777")
7068 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--mode=777")
7071 self
._do
_subvolume
_io
(subvolume1
, number_of_files
=32)
7073 # snapshot subvolume
7074 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume1
, snapshot
)
7076 # schedule a clone with target as subvolume2
7078 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, subvolume2
)
7079 except CommandFailedError
as ce
:
7080 if ce
.exitstatus
!= errno
.EEXIST
:
7081 raise RuntimeError("invalid error code when cloning to existing subvolume")
7083 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
7085 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, clone
)
7087 # schedule a clone with target as clone
7089 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, clone
)
7090 except CommandFailedError
as ce
:
7091 if ce
.exitstatus
!= errno
.EEXIST
:
7092 raise RuntimeError("invalid error code when cloning to existing clone")
7094 raise RuntimeError("expected cloning to fail if the target is an existing clone")
7096 # check clone status
7097 self
._wait
_for
_clone
_to
_complete
(clone
)
7100 self
._verify
_clone
(subvolume1
, snapshot
, clone
)
7103 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume1
, snapshot
)
7106 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7107 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
)
7108 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7110 # verify trash dir is clean
7111 self
._wait
_for
_trash
_empty
()
7113 def test_subvolume_snapshot_clone_pool_layout(self
):
7114 subvolume
= self
._generate
_random
_subvolume
_name
()
7115 snapshot
= self
._generate
_random
_snapshot
_name
()
7116 clone
= self
._generate
_random
_clone
_name
()
7119 new_pool
= "new_pool"
7120 newid
= self
.fs
.add_data_pool(new_pool
)
7123 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
7126 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
7128 # snapshot subvolume
7129 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7132 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, "--pool_layout", new_pool
)
7134 # check clone status
7135 self
._wait
_for
_clone
_to
_complete
(clone
)
7138 self
._verify
_clone
(subvolume
, snapshot
, clone
, clone_pool
=new_pool
)
7141 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7143 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, clone
)
7144 desired_pool
= self
.mount_a
.getfattr(subvol_path
, "ceph.dir.layout.pool")
7146 self
.assertEqual(desired_pool
, new_pool
)
7147 except AssertionError:
7148 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
7151 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7152 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7154 # verify trash dir is clean
7155 self
._wait
_for
_trash
_empty
()
7157 def test_subvolume_snapshot_clone_under_group(self
):
7158 subvolume
= self
._generate
_random
_subvolume
_name
()
7159 snapshot
= self
._generate
_random
_snapshot
_name
()
7160 clone
= self
._generate
_random
_clone
_name
()
7161 group
= self
._generate
_random
_group
_name
()
7164 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
7167 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
7169 # snapshot subvolume
7170 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7173 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
7176 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, '--target_group_name', group
)
7178 # check clone status
7179 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=group
)
7182 self
._verify
_clone
(subvolume
, snapshot
, clone
, clone_group
=group
)
7185 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7188 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7189 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, group
)
7192 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7194 # verify trash dir is clean
7195 self
._wait
_for
_trash
_empty
()
7197 def test_subvolume_snapshot_clone_with_attrs(self
):
7198 subvolume
= self
._generate
_random
_subvolume
_name
()
7199 snapshot
= self
._generate
_random
_snapshot
_name
()
7200 clone
= self
._generate
_random
_clone
_name
()
7210 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", mode
, "--uid", uid
, "--gid", gid
)
7213 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
7215 # snapshot subvolume
7216 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7218 # change subvolume attrs (to ensure clone picks up snapshot attrs)
7219 self
._do
_subvolume
_attr
_update
(subvolume
, new_uid
, new_gid
, new_mode
)
7222 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
7224 # check clone status
7225 self
._wait
_for
_clone
_to
_complete
(clone
)
7228 self
._verify
_clone
(subvolume
, snapshot
, clone
)
7231 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7234 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7235 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7237 # verify trash dir is clean
7238 self
._wait
_for
_trash
_empty
()
7240 def test_subvolume_snapshot_clone_with_upgrade(self
):
7242 yet another poor man's upgrade test -- rather than going through a full
7243 upgrade cycle, emulate old types subvolumes by going through the wormhole
7244 and verify clone operation.
7245 further ensure that a legacy volume is not updated to v2, but clone is.
7247 subvolume
= self
._generate
_random
_subvolume
_name
()
7248 snapshot
= self
._generate
_random
_snapshot
_name
()
7249 clone
= self
._generate
_random
_clone
_name
()
7251 # emulate a old-fashioned subvolume
7252 createpath
= os
.path
.join(".", "volumes", "_nogroup", subvolume
)
7253 self
.mount_a
.run_shell_payload(f
"sudo mkdir -p -m 777 {createpath}", omit_sudo
=False)
7255 # add required xattrs to subvolume
7256 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7257 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7260 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
7262 # snapshot subvolume
7263 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7265 # ensure metadata file is in legacy location, with required version v1
7266 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume
, version
=1, legacy
=True)
7268 # Insert delay at the beginning of snapshot clone
7269 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7272 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
7274 # snapshot should not be deletable now
7276 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7277 except CommandFailedError
as ce
:
7278 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, msg
="invalid error code when removing source snapshot of a clone")
7280 self
.fail("expected removing source snapshot of a clone to fail")
7282 # check clone status
7283 self
._wait
_for
_clone
_to
_complete
(clone
)
7286 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_version
=1)
7289 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7291 # ensure metadata file is in v2 location, with required version v2
7292 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone
)
7295 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7296 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7298 # verify trash dir is clean
7299 self
._wait
_for
_trash
_empty
()
7301 def test_subvolume_snapshot_reconf_max_concurrent_clones(self
):
7303 Validate 'max_concurrent_clones' config option
7306 # get the default number of cloner threads
7307 default_max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7308 self
.assertEqual(default_max_concurrent_clones
, 4)
7310 # Increase number of cloner threads
7311 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
7312 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7313 self
.assertEqual(max_concurrent_clones
, 6)
7315 # Decrease number of cloner threads
7316 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7317 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7318 self
.assertEqual(max_concurrent_clones
, 2)
7320 def test_subvolume_snapshot_config_snapshot_clone_delay(self
):
7322 Validate 'snapshot_clone_delay' config option
7325 # get the default delay before starting the clone
7326 default_timeout
= int(self
.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7327 self
.assertEqual(default_timeout
, 0)
7329 # Insert delay of 2 seconds at the beginning of the snapshot clone
7330 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7331 default_timeout
= int(self
.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7332 self
.assertEqual(default_timeout
, 2)
7334 # Decrease number of cloner threads
7335 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7336 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7337 self
.assertEqual(max_concurrent_clones
, 2)
7339 def test_subvolume_under_group_snapshot_clone(self
):
7340 subvolume
= self
._generate
_random
_subvolume
_name
()
7341 group
= self
._generate
_random
_group
_name
()
7342 snapshot
= self
._generate
_random
_snapshot
_name
()
7343 clone
= self
._generate
_random
_clone
_name
()
7346 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
7349 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, group
, "--mode=777")
7352 self
._do
_subvolume
_io
(subvolume
, subvolume_group
=group
, number_of_files
=32)
7354 # snapshot subvolume
7355 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
7358 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, '--group_name', group
)
7360 # check clone status
7361 self
._wait
_for
_clone
_to
_complete
(clone
)
7364 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_group
=group
)
7367 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
7370 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
7371 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7374 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7376 # verify trash dir is clean
7377 self
._wait
_for
_trash
_empty
()
7380 class TestMisc(TestVolumesHelper
):
7381 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
7382 def test_connection_expiration(self
):
7383 # unmount any cephfs mounts
7384 for i
in range(0, self
.CLIENTS_REQUIRED
):
7385 self
.mounts
[i
].umount_wait()
7386 sessions
= self
._session
_list
()
7387 self
.assertLessEqual(len(sessions
), 1) # maybe mgr is already mounted
7389 # Get the mgr to definitely mount cephfs
7390 subvolume
= self
._generate
_random
_subvolume
_name
()
7391 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
7392 sessions
= self
._session
_list
()
7393 self
.assertEqual(len(sessions
), 1)
7395 # Now wait for the mgr to expire the connection:
7396 self
.wait_until_evicted(sessions
[0]['id'], timeout
=90)
7398 def test_mgr_eviction(self
):
7399 # unmount any cephfs mounts
7400 for i
in range(0, self
.CLIENTS_REQUIRED
):
7401 self
.mounts
[i
].umount_wait()
7402 sessions
= self
._session
_list
()
7403 self
.assertLessEqual(len(sessions
), 1) # maybe mgr is already mounted
7405 # Get the mgr to definitely mount cephfs
7406 subvolume
= self
._generate
_random
_subvolume
_name
()
7407 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
7408 sessions
= self
._session
_list
()
7409 self
.assertEqual(len(sessions
), 1)
7411 # Now fail the mgr, check the session was evicted
7412 mgr
= self
.mgr_cluster
.get_active_id()
7413 self
.mgr_cluster
.mgr_fail(mgr
)
7414 self
.wait_until_evicted(sessions
[0]['id'])
7416 def test_names_can_only_be_goodchars(self
):
7418 Test the creating vols, subvols subvolgroups fails when their names uses
7419 characters beyond [a-zA-Z0-9 -_.].
7421 volname
, badname
= 'testvol', 'abcd@#'
7423 with self
.assertRaises(CommandFailedError
):
7424 self
._fs
_cmd
('volume', 'create', badname
)
7425 self
._fs
_cmd
('volume', 'create', volname
)
7427 with self
.assertRaises(CommandFailedError
):
7428 self
._fs
_cmd
('subvolumegroup', 'create', volname
, badname
)
7430 with self
.assertRaises(CommandFailedError
):
7431 self
._fs
_cmd
('subvolume', 'create', volname
, badname
)
7432 self
._fs
_cmd
('volume', 'rm', volname
, '--yes-i-really-mean-it')
7434 def test_subvolume_ops_on_nonexistent_vol(self
):
7435 # tests the fs subvolume operations on non existing volume
7437 volname
= "non_existent_subvolume"
7439 # try subvolume operations
7440 for op
in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
7443 self
._fs
_cmd
("subvolume", "resize", volname
, "subvolname_1", "inf")
7445 self
._fs
_cmd
("subvolume", "pin", volname
, "subvolname_1", "export", "1")
7447 self
._fs
_cmd
("subvolume", "ls", volname
)
7449 self
._fs
_cmd
("subvolume", op
, volname
, "subvolume_1")
7450 except CommandFailedError
as ce
:
7451 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7453 self
.fail("expected the 'fs subvolume {0}' command to fail".format(op
))
7455 # try subvolume snapshot operations and clone create
7456 for op
in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
7459 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1")
7461 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1", "snapshot_1", "clone_1")
7463 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1", "snapshot_1")
7464 except CommandFailedError
as ce
:
7465 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7467 self
.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op
))
7471 self
._fs
_cmd
("clone", "status", volname
, "clone_1")
7472 except CommandFailedError
as ce
:
7473 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7475 self
.fail("expected the 'fs clone status' command to fail")
7477 # try subvolumegroup operations
7478 for op
in ("create", "rm", "getpath", "pin", "ls"):
7481 self
._fs
_cmd
("subvolumegroup", "pin", volname
, "group_1", "export", "0")
7483 self
._fs
_cmd
("subvolumegroup", op
, volname
)
7485 self
._fs
_cmd
("subvolumegroup", op
, volname
, "group_1")
7486 except CommandFailedError
as ce
:
7487 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7489 self
.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op
))
7491 # try subvolumegroup snapshot operations
7492 for op
in ("create", "rm", "ls"):
7495 self
._fs
_cmd
("subvolumegroup", "snapshot", op
, volname
, "group_1")
7497 self
._fs
_cmd
("subvolumegroup", "snapshot", op
, volname
, "group_1", "snapshot_1")
7498 except CommandFailedError
as ce
:
7499 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7501 self
.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op
))
7503 def test_subvolume_upgrade_legacy_to_v1(self
):
7505 poor man's upgrade test -- rather than going through a full upgrade cycle,
7506 emulate subvolumes by going through the wormhole and verify if they are
7508 further ensure that a legacy volume is not updated to v2.
7510 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
7511 group
= self
._generate
_random
_group
_name
()
7513 # emulate a old-fashioned subvolume -- one in the default group and
7514 # the other in a custom group
7515 createpath1
= os
.path
.join(".", "volumes", "_nogroup", subvolume1
)
7516 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath1
], omit_sudo
=False)
7519 createpath2
= os
.path
.join(".", "volumes", group
, subvolume2
)
7520 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath2
], omit_sudo
=False)
7522 # this would auto-upgrade on access without anyone noticing
7523 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume1
)
7524 self
.assertNotEqual(subvolpath1
, None)
7525 subvolpath1
= subvolpath1
.rstrip() # remove "/" prefix and any trailing newline
7527 subvolpath2
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume2
, group
)
7528 self
.assertNotEqual(subvolpath2
, None)
7529 subvolpath2
= subvolpath2
.rstrip() # remove "/" prefix and any trailing newline
7531 # and... the subvolume path returned should be what we created behind the scene
7532 self
.assertEqual(createpath1
[1:], subvolpath1
)
7533 self
.assertEqual(createpath2
[1:], subvolpath2
)
7535 # ensure metadata file is in legacy location, with required version v1
7536 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=1, legacy
=True)
7537 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=1, legacy
=True)
7540 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7541 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
7543 # verify trash dir is clean
7544 self
._wait
_for
_trash
_empty
()
7547 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7549 def test_subvolume_no_upgrade_v1_sanity(self
):
7551 poor man's upgrade test -- theme continues...
7553 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
7554 a series of operations on the v1 subvolume to ensure they work as expected.
7556 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
7557 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
7558 "type", "uid", "features", "state"]
7559 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
7561 subvolume
= self
._generate
_random
_subvolume
_name
()
7562 snapshot
= self
._generate
_random
_snapshot
_name
()
7563 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
7568 # emulate a v1 subvolume -- in the default group
7569 subvolume_path
= self
._create
_v
1_subvolume
(subvolume
)
7572 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
7573 self
.assertEqual(subvolpath
, subvolume_path
)
7576 subvolumes
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
7577 self
.assertEqual(len(subvolumes
), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes
)))
7578 self
.assertEqual(subvolumes
[0]['name'], subvolume
,
7579 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume
, subvolumes
[0]['name']))
7582 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
7583 for md
in subvol_md
:
7584 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
7586 self
.assertEqual(subvol_info
["state"], "complete",
7587 msg
="expected state to be 'complete', found '{0}".format(subvol_info
["state"]))
7588 self
.assertEqual(len(subvol_info
["features"]), 2,
7589 msg
="expected 1 feature, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
7590 for feature
in ['snapshot-clone', 'snapshot-autoprotect']:
7591 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
7594 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024*10
7595 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
7596 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
7597 for md
in subvol_md
:
7598 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
7599 self
.assertEqual(subvol_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
7601 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
7602 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", mode
, "--uid", uid
, "--gid", gid
)
7605 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
7608 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7611 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
7613 # check clone status
7614 self
._wait
_for
_clone
_to
_complete
(clone1
)
7616 # ensure clone is v2
7617 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone1
, version
=2)
7620 self
._verify
_clone
(subvolume
, snapshot
, clone1
, source_version
=1)
7622 # clone (older snapshot)
7623 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, 'fake', clone2
)
7625 # check clone status
7626 self
._wait
_for
_clone
_to
_complete
(clone2
)
7628 # ensure clone is v2
7629 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone2
, version
=2)
7632 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
7633 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
7636 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
7638 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
7639 self
.assertEqual(snap_info
["has_pending_clones"], "no")
7642 subvol_snapshots
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
7643 self
.assertEqual(len(subvol_snapshots
), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots
)))
7644 snapshotnames
= [snapshot
['name'] for snapshot
in subvol_snapshots
]
7645 for name
in [snapshot
, 'fake']:
7646 self
.assertIn(name
, snapshotnames
, msg
="expected snapshot '{0}' in subvolume snapshot ls".format(name
))
7649 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7650 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, "fake")
7652 # ensure volume is still at version 1
7653 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume
, version
=1)
7656 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7657 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
7658 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
7660 # verify trash dir is clean
7661 self
._wait
_for
_trash
_empty
()
7663 def test_subvolume_no_upgrade_v1_to_v2(self
):
7665 poor man's upgrade test -- theme continues...
7666 ensure v1 to v2 upgrades are not done automatically due to various states of v1
7668 subvolume1
, subvolume2
, subvolume3
= self
._generate
_random
_subvolume
_name
(3)
7669 group
= self
._generate
_random
_group
_name
()
7671 # emulate a v1 subvolume -- in the default group
7672 subvol1_path
= self
._create
_v
1_subvolume
(subvolume1
)
7674 # emulate a v1 subvolume -- in a custom group
7675 subvol2_path
= self
._create
_v
1_subvolume
(subvolume2
, subvol_group
=group
)
7677 # emulate a v1 subvolume -- in a clone pending state
7678 self
._create
_v
1_subvolume
(subvolume3
, subvol_type
='clone', has_snapshot
=False, state
='pending')
7680 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
7681 subvolpath1
= self
._get
_subvolume
_path
(self
.volname
, subvolume1
)
7682 self
.assertEqual(subvolpath1
, subvol1_path
)
7684 subvolpath2
= self
._get
_subvolume
_path
(self
.volname
, subvolume2
, group_name
=group
)
7685 self
.assertEqual(subvolpath2
, subvol2_path
)
7687 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
7688 # use clone status, as only certain operations are allowed in pending state
7689 status
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, subvolume3
))
7690 self
.assertEqual(status
["status"]["state"], "pending")
7693 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume1
, "fake")
7694 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume2
, "fake", group
)
7696 # ensure metadata file is in v1 location, with version retained as v1
7697 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=1)
7698 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=1)
7701 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7702 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
7704 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume3
)
7705 except CommandFailedError
as ce
:
7706 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on rm of subvolume undergoing clone")
7708 self
.fail("expected rm of subvolume undergoing clone to fail")
7710 # ensure metadata file is in v1 location, with version retained as v1
7711 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume3
, version
=1)
7712 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume3
, "--force")
7714 # verify list subvolumes returns an empty list
7715 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
7716 self
.assertEqual(len(subvolumels
), 0)
7718 # verify trash dir is clean
7719 self
._wait
_for
_trash
_empty
()
7721 def test_subvolume_upgrade_v1_to_v2(self
):
7723 poor man's upgrade test -- theme continues...
7724 ensure v1 to v2 upgrades work
7726 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
7727 group
= self
._generate
_random
_group
_name
()
7729 # emulate a v1 subvolume -- in the default group
7730 subvol1_path
= self
._create
_v
1_subvolume
(subvolume1
, has_snapshot
=False)
7732 # emulate a v1 subvolume -- in a custom group
7733 subvol2_path
= self
._create
_v
1_subvolume
(subvolume2
, subvol_group
=group
, has_snapshot
=False)
7735 # this would attempt auto-upgrade on access
7736 subvolpath1
= self
._get
_subvolume
_path
(self
.volname
, subvolume1
)
7737 self
.assertEqual(subvolpath1
, subvol1_path
)
7739 subvolpath2
= self
._get
_subvolume
_path
(self
.volname
, subvolume2
, group_name
=group
)
7740 self
.assertEqual(subvolpath2
, subvol2_path
)
7742 # ensure metadata file is in v2 location, with version retained as v2
7743 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=2)
7744 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=2)
7747 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7748 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
7750 # verify trash dir is clean
7751 self
._wait
_for
_trash
_empty
()
7753 def test_malicious_metafile_on_legacy_to_v1_upgrade(self
):
7755 Validate handcrafted .meta file on legacy subvol root doesn't break the system
7756 on legacy subvol upgrade to v1
7757 poor man's upgrade test -- theme continues...
7759 subvol1
, subvol2
= self
._generate
_random
_subvolume
_name
(2)
7761 # emulate a old-fashioned subvolume in the default group
7762 createpath1
= os
.path
.join(".", "volumes", "_nogroup", subvol1
)
7763 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath1
], omit_sudo
=False)
7765 # add required xattrs to subvolume
7766 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7767 self
.mount_a
.setfattr(createpath1
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7769 # create v2 subvolume
7770 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
)
7772 # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume
7773 # .meta into legacy subvol1's root
7774 subvol2_metapath
= os
.path
.join(".", "volumes", "_nogroup", subvol2
, ".meta")
7775 self
.mount_a
.run_shell(['sudo', 'cp', subvol2_metapath
, createpath1
], omit_sudo
=False)
7777 # Upgrade legacy subvol1 to v1
7778 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol1
)
7779 self
.assertNotEqual(subvolpath1
, None)
7780 subvolpath1
= subvolpath1
.rstrip()
7782 # the subvolume path returned should not be of subvol2 from handcrafted
7784 self
.assertEqual(createpath1
[1:], subvolpath1
)
7786 # ensure metadata file is in legacy location, with required version v1
7787 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol1
, version
=1, legacy
=True)
7789 # Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2
7790 # path whose '.meta' file is copied to subvol1 root
7792 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvol1
, authid1
)
7794 # Validate that the mds path added is of subvol1 and not of subvol2
7795 out
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
7796 self
.assertEqual("client.alice", out
[0]["entity"])
7797 self
.assertEqual("allow rw path={0}".format(createpath1
[1:]), out
[0]["caps"]["mds"])
7800 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
)
7801 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
)
7803 # verify trash dir is clean
7804 self
._wait
_for
_trash
_empty
()
7806 def test_binary_metafile_on_legacy_to_v1_upgrade(self
):
7808 Validate binary .meta file on legacy subvol root doesn't break the system
7809 on legacy subvol upgrade to v1
7810 poor man's upgrade test -- theme continues...
7812 subvol
= self
._generate
_random
_subvolume
_name
()
7813 group
= self
._generate
_random
_group
_name
()
7815 # emulate a old-fashioned subvolume -- in a custom group
7816 createpath
= os
.path
.join(".", "volumes", group
, subvol
)
7817 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
7819 # add required xattrs to subvolume
7820 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7821 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7823 # Create unparseable binary .meta file on legacy subvol's root
7824 meta_contents
= os
.urandom(4096)
7825 meta_filepath
= os
.path
.join(self
.mount_a
.mountpoint
, createpath
, ".meta")
7826 self
.mount_a
.client_remote
.write_file(meta_filepath
, meta_contents
, sudo
=True)
7828 # Upgrade legacy subvol to v1
7829 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol
, group
)
7830 self
.assertNotEqual(subvolpath
, None)
7831 subvolpath
= subvolpath
.rstrip()
7833 # The legacy subvolume path should be returned for subvol.
7834 # Should ignore unparseable binary .meta file in subvol's root
7835 self
.assertEqual(createpath
[1:], subvolpath
)
7837 # ensure metadata file is in legacy location, with required version v1
7838 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol
, subvol_group
=group
, version
=1, legacy
=True)
7841 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol
, group
)
7843 # verify trash dir is clean
7844 self
._wait
_for
_trash
_empty
()
7847 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7849 def test_unparseable_metafile_on_legacy_to_v1_upgrade(self
):
7851 Validate unparseable text .meta file on legacy subvol root doesn't break the system
7852 on legacy subvol upgrade to v1
7853 poor man's upgrade test -- theme continues...
7855 subvol
= self
._generate
_random
_subvolume
_name
()
7856 group
= self
._generate
_random
_group
_name
()
7858 # emulate a old-fashioned subvolume -- in a custom group
7859 createpath
= os
.path
.join(".", "volumes", group
, subvol
)
7860 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
7862 # add required xattrs to subvolume
7863 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7864 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7866 # Create unparseable text .meta file on legacy subvol's root
7867 meta_contents
= "unparseable config\nfile ...\nunparseable config\nfile ...\n"
7868 meta_filepath
= os
.path
.join(self
.mount_a
.mountpoint
, createpath
, ".meta")
7869 self
.mount_a
.client_remote
.write_file(meta_filepath
, meta_contents
, sudo
=True)
7871 # Upgrade legacy subvol to v1
7872 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol
, group
)
7873 self
.assertNotEqual(subvolpath
, None)
7874 subvolpath
= subvolpath
.rstrip()
7876 # The legacy subvolume path should be returned for subvol.
7877 # Should ignore unparseable binary .meta file in subvol's root
7878 self
.assertEqual(createpath
[1:], subvolpath
)
7880 # ensure metadata file is in legacy location, with required version v1
7881 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol
, subvol_group
=group
, version
=1, legacy
=True)
7884 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol
, group
)
7886 # verify trash dir is clean
7887 self
._wait
_for
_trash
_empty
()
7890 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7892 class TestPerModuleFinsherThread(TestVolumesHelper
):
7894 Per module finisher thread tests related to mgr/volume cmds.
7895 This is used in conjuction with check_counter with min val being 4
7896 as four subvolume cmds are run
7898 def test_volumes_module_finisher_thread(self
):
7899 subvol1
, subvol2
, subvol3
= self
._generate
_random
_subvolume
_name
(3)
7900 group
= self
._generate
_random
_group
_name
()
7903 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
7905 # create subvolumes in group
7906 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
7907 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
)
7908 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol3
, "--group_name", group
)
7910 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
7911 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
7912 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol3
, group
)
7913 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7915 # verify trash dir is clean
7916 self
._wait
_for
_trash
_empty
()