10 from hashlib
import md5
11 from textwrap
import dedent
12 from io
import StringIO
14 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
15 from tasks
.cephfs
.fuse_mount
import FuseMount
16 from teuthology
.exceptions
import CommandFailedError
18 log
= logging
.getLogger(__name__
)
20 class TestVolumesHelper(CephFSTestCase
):
21 """Helper class for testing FS volume, subvolume group and subvolume operations."""
22 TEST_VOLUME_PREFIX
= "volume"
23 TEST_SUBVOLUME_PREFIX
="subvolume"
24 TEST_GROUP_PREFIX
="group"
25 TEST_SNAPSHOT_PREFIX
="snapshot"
26 TEST_CLONE_PREFIX
="clone"
27 TEST_FILE_NAME_PREFIX
="subvolume_file"
29 # for filling subvolume with data
34 DEFAULT_FILE_SIZE
= 1 # MB
35 DEFAULT_NUMBER_OF_FILES
= 1024
37 def _fs_cmd(self
, *args
):
38 return self
.mgr_cluster
.mon_manager
.raw_cluster_cmd("fs", *args
)
40 def _raw_cmd(self
, *args
):
41 return self
.mgr_cluster
.mon_manager
.raw_cluster_cmd(*args
)
43 def __check_clone_state(self
, state
, clone
, clone_group
=None, timo
=120):
45 args
= ["clone", "status", self
.volname
, clone
]
47 args
.append(clone_group
)
50 result
= json
.loads(self
._fs
_cmd
(*args
))
51 if result
["status"]["state"] == state
:
55 self
.assertTrue(check
< timo
)
57 def _get_clone_status(self
, clone
, clone_group
=None):
58 args
= ["clone", "status", self
.volname
, clone
]
60 args
.append(clone_group
)
62 result
= json
.loads(self
._fs
_cmd
(*args
))
65 def _wait_for_clone_to_complete(self
, clone
, clone_group
=None, timo
=120):
66 self
.__check
_clone
_state
("complete", clone
, clone_group
, timo
)
68 def _wait_for_clone_to_fail(self
, clone
, clone_group
=None, timo
=120):
69 self
.__check
_clone
_state
("failed", clone
, clone_group
, timo
)
71 def _wait_for_clone_to_be_in_progress(self
, clone
, clone_group
=None, timo
=120):
72 self
.__check
_clone
_state
("in-progress", clone
, clone_group
, timo
)
74 def _check_clone_canceled(self
, clone
, clone_group
=None):
75 self
.__check
_clone
_state
("canceled", clone
, clone_group
, timo
=1)
77 def _get_subvolume_snapshot_path(self
, subvolume
, snapshot
, source_group
, subvol_path
, source_version
):
78 if source_version
== 2:
80 if subvol_path
is not None:
81 (base_path
, uuid_str
) = os
.path
.split(subvol_path
)
83 (base_path
, uuid_str
) = os
.path
.split(self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=source_group
))
84 return os
.path
.join(base_path
, ".snap", snapshot
, uuid_str
)
87 base_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=source_group
)
88 return os
.path
.join(base_path
, ".snap", snapshot
)
90 def _verify_clone_attrs(self
, source_path
, clone_path
):
94 p
= self
.mount_a
.run_shell(["find", path1
])
95 paths
= p
.stdout
.getvalue().strip().split()
97 # for each entry in source and clone (sink) verify certain inode attributes:
98 # inode type, mode, ownership, [am]time.
99 for source_path
in paths
:
100 sink_entry
= source_path
[len(path1
)+1:]
101 sink_path
= os
.path
.join(path2
, sink_entry
)
104 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%f', source_path
]).stdout
.getvalue().strip(), 16)
105 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%f', sink_path
]).stdout
.getvalue().strip(), 16)
106 self
.assertEqual(sval
, cval
)
109 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', source_path
]).stdout
.getvalue().strip())
110 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', sink_path
]).stdout
.getvalue().strip())
111 self
.assertEqual(sval
, cval
)
113 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', source_path
]).stdout
.getvalue().strip())
114 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', sink_path
]).stdout
.getvalue().strip())
115 self
.assertEqual(sval
, cval
)
118 # do not check access as kclient will generally not update this like ceph-fuse will.
119 sval
= int(self
.mount_a
.run_shell(['stat', '-c' '%Y', source_path
]).stdout
.getvalue().strip())
120 cval
= int(self
.mount_a
.run_shell(['stat', '-c' '%Y', sink_path
]).stdout
.getvalue().strip())
121 self
.assertEqual(sval
, cval
)
123 def _verify_clone_root(self
, source_path
, clone_path
, clone
, clone_group
, clone_pool
):
124 # verifies following clone root attrs quota, data_pool and pool_namespace
125 # remaining attributes of clone root are validated in _verify_clone_attrs
127 clone_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, clone
, clone_group
))
129 # verify quota is inherited from source snapshot
130 src_quota
= self
.mount_a
.getfattr(source_path
, "ceph.quota.max_bytes")
131 # FIXME: kclient fails to get this quota value: https://tracker.ceph.com/issues/48075
132 if isinstance(self
.mount_a
, FuseMount
):
133 self
.assertEqual(clone_info
["bytes_quota"], "infinite" if src_quota
is None else int(src_quota
))
136 # verify pool is set as per request
137 self
.assertEqual(clone_info
["data_pool"], clone_pool
)
139 # verify pool and pool namespace are inherited from snapshot
140 self
.assertEqual(clone_info
["data_pool"],
141 self
.mount_a
.getfattr(source_path
, "ceph.dir.layout.pool"))
142 self
.assertEqual(clone_info
["pool_namespace"],
143 self
.mount_a
.getfattr(source_path
, "ceph.dir.layout.pool_namespace"))
145 def _verify_clone(self
, subvolume
, snapshot
, clone
,
146 source_group
=None, clone_group
=None, clone_pool
=None,
147 subvol_path
=None, source_version
=2, timo
=120):
148 # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed
149 # but snapshots are retained for clone verification
150 path1
= self
._get
_subvolume
_snapshot
_path
(subvolume
, snapshot
, source_group
, subvol_path
, source_version
)
151 path2
= self
._get
_subvolume
_path
(self
.volname
, clone
, group_name
=clone_group
)
154 # TODO: currently snapshot rentries are not stable if snapshot source entries
155 # are removed, https://tracker.ceph.com/issues/46747
156 while check
< timo
and subvol_path
is None:
157 val1
= int(self
.mount_a
.getfattr(path1
, "ceph.dir.rentries"))
158 val2
= int(self
.mount_a
.getfattr(path2
, "ceph.dir.rentries"))
163 self
.assertTrue(check
< timo
)
165 self
._verify
_clone
_root
(path1
, path2
, clone
, clone_group
, clone_pool
)
166 self
._verify
_clone
_attrs
(path1
, path2
)
168 def _generate_random_volume_name(self
, count
=1):
169 n
= self
.volume_start
170 volumes
= [f
"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
171 self
.volume_start
+= count
172 return volumes
[0] if count
== 1 else volumes
174 def _generate_random_subvolume_name(self
, count
=1):
175 n
= self
.subvolume_start
176 subvolumes
= [f
"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
177 self
.subvolume_start
+= count
178 return subvolumes
[0] if count
== 1 else subvolumes
180 def _generate_random_group_name(self
, count
=1):
182 groups
= [f
"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
183 self
.group_start
+= count
184 return groups
[0] if count
== 1 else groups
186 def _generate_random_snapshot_name(self
, count
=1):
187 n
= self
.snapshot_start
188 snaps
= [f
"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
189 self
.snapshot_start
+= count
190 return snaps
[0] if count
== 1 else snaps
192 def _generate_random_clone_name(self
, count
=1):
194 clones
= [f
"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i
in range(n
, n
+count
)]
195 self
.clone_start
+= count
196 return clones
[0] if count
== 1 else clones
198 def _enable_multi_fs(self
):
199 self
._fs
_cmd
("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it")
201 def _create_or_reuse_test_volume(self
):
202 result
= json
.loads(self
._fs
_cmd
("volume", "ls"))
204 self
.vol_created
= True
205 self
.volname
= self
._generate
_random
_volume
_name
()
206 self
._fs
_cmd
("volume", "create", self
.volname
)
208 self
.volname
= result
[0]['name']
210 def _get_volume_info(self
, vol_name
):
211 args
= ["volume", "info", vol_name
]
213 vol_md
= self
._fs
_cmd
(*args
)
216 def _get_subvolume_group_path(self
, vol_name
, group_name
):
217 args
= ("subvolumegroup", "getpath", vol_name
, group_name
)
218 path
= self
._fs
_cmd
(*args
)
219 # remove the leading '/', and trailing whitespaces
220 return path
[1:].rstrip()
222 def _get_subvolume_group_info(self
, vol_name
, group_name
):
223 args
= ["subvolumegroup", "info", vol_name
, group_name
]
225 group_md
= self
._fs
_cmd
(*args
)
228 def _get_subvolume_path(self
, vol_name
, subvol_name
, group_name
=None):
229 args
= ["subvolume", "getpath", vol_name
, subvol_name
]
231 args
.append(group_name
)
233 path
= self
._fs
_cmd
(*args
)
234 # remove the leading '/', and trailing whitespaces
235 return path
[1:].rstrip()
237 def _get_subvolume_info(self
, vol_name
, subvol_name
, group_name
=None):
238 args
= ["subvolume", "info", vol_name
, subvol_name
]
240 args
.append(group_name
)
242 subvol_md
= self
._fs
_cmd
(*args
)
245 def _get_subvolume_snapshot_info(self
, vol_name
, subvol_name
, snapname
, group_name
=None):
246 args
= ["subvolume", "snapshot", "info", vol_name
, subvol_name
, snapname
]
248 args
.append(group_name
)
250 snap_md
= self
._fs
_cmd
(*args
)
253 def _delete_test_volume(self
):
254 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
256 def _do_subvolume_pool_and_namespace_update(self
, subvolume
, pool
=None, pool_namespace
=None, subvolume_group
=None):
257 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
260 self
.mount_a
.setfattr(subvolpath
, 'ceph.dir.layout.pool', pool
, sudo
=True)
262 if pool_namespace
is not None:
263 self
.mount_a
.setfattr(subvolpath
, 'ceph.dir.layout.pool_namespace', pool_namespace
, sudo
=True)
265 def _do_subvolume_attr_update(self
, subvolume
, uid
, gid
, mode
, subvolume_group
=None):
266 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
269 self
.mount_a
.run_shell(['chmod', mode
, subvolpath
], sudo
=True)
272 self
.mount_a
.run_shell(['chown', uid
, subvolpath
], sudo
=True)
273 self
.mount_a
.run_shell(['chgrp', gid
, subvolpath
], sudo
=True)
275 def _do_subvolume_io(self
, subvolume
, subvolume_group
=None, create_dir
=None,
276 number_of_files
=DEFAULT_NUMBER_OF_FILES
, file_size
=DEFAULT_FILE_SIZE
):
277 # get subvolume path for IO
278 args
= ["subvolume", "getpath", self
.volname
, subvolume
]
280 args
.append(subvolume_group
)
282 subvolpath
= self
._fs
_cmd
(*args
)
283 self
.assertNotEqual(subvolpath
, None)
284 subvolpath
= subvolpath
[1:].rstrip() # remove "/" prefix and any trailing newline
288 io_path
= os
.path
.join(subvolpath
, create_dir
)
289 self
.mount_a
.run_shell_payload(f
"mkdir -p {io_path}")
291 log
.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume
, number_of_files
, file_size
, io_path
))
292 for i
in range(number_of_files
):
293 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
294 self
.mount_a
.write_n_mb(os
.path
.join(io_path
, filename
), file_size
)
296 def _do_subvolume_io_mixed(self
, subvolume
, subvolume_group
=None):
297 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
, group_name
=subvolume_group
)
299 reg_file
= "regfile.0"
300 dir_path
= os
.path
.join(subvolpath
, "dir.0")
301 sym_path1
= os
.path
.join(subvolpath
, "sym.0")
302 # this symlink's ownership would be changed
303 sym_path2
= os
.path
.join(dir_path
, "sym.0")
305 self
.mount_a
.run_shell(["mkdir", dir_path
])
306 self
.mount_a
.run_shell(["ln", "-s", "./{}".format(reg_file
), sym_path1
])
307 self
.mount_a
.run_shell(["ln", "-s", "./{}".format(reg_file
), sym_path2
])
308 # flip ownership to nobody. assumption: nobody's id is 65534
309 self
.mount_a
.run_shell(["chown", "-h", "65534:65534", sym_path2
], sudo
=True, omit_sudo
=False)
311 def _wait_for_trash_empty(self
, timeout
=60):
312 # XXX: construct the trash dir path (note that there is no mgr
313 # [sub]volume interface for this).
314 trashdir
= os
.path
.join("./", "volumes", "_deleting")
315 self
.mount_a
.wait_for_dir_empty(trashdir
, timeout
=timeout
)
317 def _wait_for_subvol_trash_empty(self
, subvol
, group
="_nogroup", timeout
=30):
318 trashdir
= os
.path
.join("./", "volumes", group
, subvol
, ".trash")
320 self
.mount_a
.wait_for_dir_empty(trashdir
, timeout
=timeout
)
321 except CommandFailedError
as ce
:
322 if ce
.exitstatus
!= errno
.ENOENT
:
327 def _assert_meta_location_and_version(self
, vol_name
, subvol_name
, subvol_group
=None, version
=2, legacy
=False):
329 subvol_path
= self
._get
_subvolume
_path
(vol_name
, subvol_name
, group_name
=subvol_group
)
331 m
.update(("/"+subvol_path
).encode('utf-8'))
332 meta_filename
= "{0}.meta".format(m
.digest().hex())
333 metapath
= os
.path
.join(".", "volumes", "_legacy", meta_filename
)
335 group
= subvol_group
if subvol_group
is not None else '_nogroup'
336 metapath
= os
.path
.join(".", "volumes", group
, subvol_name
, ".meta")
338 out
= self
.mount_a
.run_shell(['cat', metapath
], sudo
=True)
339 lines
= out
.stdout
.getvalue().strip().split('\n')
342 if line
== "version = " + str(version
):
345 self
.assertEqual(sv_version
, version
, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
346 version
, sv_version
, metapath
))
348 def _create_v1_subvolume(self
, subvol_name
, subvol_group
=None, has_snapshot
=True, subvol_type
='subvolume', state
='complete'):
349 group
= subvol_group
if subvol_group
is not None else '_nogroup'
350 basepath
= os
.path
.join("volumes", group
, subvol_name
)
351 uuid_str
= str(uuid
.uuid4())
352 createpath
= os
.path
.join(basepath
, uuid_str
)
353 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
355 # create a v1 snapshot, to prevent auto upgrades
357 snappath
= os
.path
.join(createpath
, ".snap", "fake")
358 self
.mount_a
.run_shell(['mkdir', '-p', snappath
], sudo
=True)
360 # add required xattrs to subvolume
361 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
362 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
364 # create a v1 .meta file
365 meta_contents
= "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type
, "/" + createpath
, state
)
366 if state
== 'pending':
367 # add a fake clone source
368 meta_contents
= meta_contents
+ '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
369 meta_filepath1
= os
.path
.join(self
.mount_a
.mountpoint
, basepath
, ".meta")
370 self
.mount_a
.client_remote
.write_file(meta_filepath1
, meta_contents
, sudo
=True)
373 def _update_fake_trash(self
, subvol_name
, subvol_group
=None, trash_name
='fake', create
=True):
374 group
= subvol_group
if subvol_group
is not None else '_nogroup'
375 trashpath
= os
.path
.join("volumes", group
, subvol_name
, '.trash', trash_name
)
377 self
.mount_a
.run_shell(['mkdir', '-p', trashpath
], sudo
=True)
379 self
.mount_a
.run_shell(['rmdir', trashpath
], sudo
=True)
381 def _configure_guest_auth(self
, guest_mount
, authid
, key
):
383 Set up auth credentials for a guest client.
385 # Create keyring file for the guest client.
386 keyring_txt
= dedent("""
390 """.format(authid
=authid
,key
=key
))
392 guest_mount
.client_id
= authid
393 guest_mount
.client_remote
.write_file(guest_mount
.get_keyring_path(),
394 keyring_txt
, sudo
=True)
395 # Add a guest client section to the ceph config file.
396 self
.config_set("client.{0}".format(authid
), "debug client", 20)
397 self
.config_set("client.{0}".format(authid
), "debug objecter", 20)
398 self
.set_conf("client.{0}".format(authid
),
399 "keyring", guest_mount
.get_keyring_path())
401 def _auth_metadata_get(self
, filedata
):
403 Return a deserialized JSON object, or None
406 data
= json
.loads(filedata
)
407 except json
.decoder
.JSONDecodeError
:
412 super(TestVolumesHelper
, self
).setUp()
414 self
.vol_created
= False
415 self
._enable
_multi
_fs
()
416 self
._create
_or
_reuse
_test
_volume
()
417 self
.config_set('mon', 'mon_allow_pool_delete', True)
418 self
.volume_start
= random
.randint(1, (1<<20))
419 self
.subvolume_start
= random
.randint(1, (1<<20))
420 self
.group_start
= random
.randint(1, (1<<20))
421 self
.snapshot_start
= random
.randint(1, (1<<20))
422 self
.clone_start
= random
.randint(1, (1<<20))
426 self
._delete
_test
_volume
()
427 super(TestVolumesHelper
, self
).tearDown()
430 class TestVolumes(TestVolumesHelper
):
431 """Tests for FS volume operations."""
432 def test_volume_create(self
):
434 That the volume can be created and then cleans up
436 volname
= self
._generate
_random
_volume
_name
()
437 self
._fs
_cmd
("volume", "create", volname
)
438 volumels
= json
.loads(self
._fs
_cmd
("volume", "ls"))
440 if not (volname
in ([volume
['name'] for volume
in volumels
])):
441 raise RuntimeError("Error creating volume '{0}'".format(volname
))
444 self
._fs
_cmd
("volume", "rm", volname
, "--yes-i-really-mean-it")
446 def test_volume_ls(self
):
448 That the existing and the newly created volumes can be listed and
451 vls
= json
.loads(self
._fs
_cmd
("volume", "ls"))
452 volumes
= [volume
['name'] for volume
in vls
]
454 #create new volumes and add it to the existing list of volumes
455 volumenames
= self
._generate
_random
_volume
_name
(2)
456 for volumename
in volumenames
:
457 self
._fs
_cmd
("volume", "create", volumename
)
458 volumes
.extend(volumenames
)
462 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
463 if len(volumels
) == 0:
464 raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.")
466 volnames
= [volume
['name'] for volume
in volumels
]
467 if collections
.Counter(volnames
) != collections
.Counter(volumes
):
468 raise RuntimeError("Error creating or listing volumes")
471 for volume
in volumenames
:
472 self
._fs
_cmd
("volume", "rm", volume
, "--yes-i-really-mean-it")
474 def test_volume_rm(self
):
476 That the volume can only be removed when --yes-i-really-mean-it is used
477 and verify that the deleted volume is not listed anymore.
479 for m
in self
.mounts
:
482 self
._fs
_cmd
("volume", "rm", self
.volname
)
483 except CommandFailedError
as ce
:
484 if ce
.exitstatus
!= errno
.EPERM
:
485 raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, "
486 "but it failed with {0}".format(ce
.exitstatus
))
488 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
491 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
492 if (self
.volname
in [volume
['name'] for volume
in volumes
]):
493 raise RuntimeError("Expected the 'fs volume rm' command to succeed. "
494 "The volume {0} not removed.".format(self
.volname
))
496 raise RuntimeError("expected the 'fs volume rm' command to fail.")
498 def test_volume_rm_arbitrary_pool_removal(self
):
500 That the arbitrary pool added to the volume out of band is removed
501 successfully on volume removal.
503 for m
in self
.mounts
:
505 new_pool
= "new_pool"
506 # add arbitrary data pool
507 self
.fs
.add_data_pool(new_pool
)
508 vol_status
= json
.loads(self
._fs
_cmd
("status", self
.volname
, "--format=json-pretty"))
509 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
512 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
513 volnames
= [volume
['name'] for volume
in volumes
]
514 self
.assertNotIn(self
.volname
, volnames
)
516 #check if osd pools are gone
517 pools
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "--format=json-pretty"))
518 for pool
in vol_status
["pools"]:
519 self
.assertNotIn(pool
["name"], pools
)
521 def test_volume_rm_when_mon_delete_pool_false(self
):
523 That the volume can only be removed when mon_allowd_pool_delete is set
524 to true and verify that the pools are removed after volume deletion.
526 for m
in self
.mounts
:
528 self
.config_set('mon', 'mon_allow_pool_delete', False)
530 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
531 except CommandFailedError
as ce
:
532 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
533 "expected the 'fs volume rm' command to fail with EPERM, "
534 "but it failed with {0}".format(ce
.exitstatus
))
535 vol_status
= json
.loads(self
._fs
_cmd
("status", self
.volname
, "--format=json-pretty"))
536 self
.config_set('mon', 'mon_allow_pool_delete', True)
537 self
._fs
_cmd
("volume", "rm", self
.volname
, "--yes-i-really-mean-it")
540 volumes
= json
.loads(self
._fs
_cmd
("volume", "ls", "--format=json-pretty"))
541 volnames
= [volume
['name'] for volume
in volumes
]
542 self
.assertNotIn(self
.volname
, volnames
,
543 "volume {0} exists after removal".format(self
.volname
))
544 #check if pools are gone
545 pools
= json
.loads(self
._raw
_cmd
("osd", "pool", "ls", "--format=json-pretty"))
546 for pool
in vol_status
["pools"]:
547 self
.assertNotIn(pool
["name"], pools
,
548 "pool {0} exists after volume removal".format(pool
["name"]))
550 def test_volume_rename(self
):
552 That volume, its file system and pools, can be renamed.
554 for m
in self
.mounts
:
556 oldvolname
= self
.volname
557 newvolname
= self
._generate
_random
_volume
_name
()
558 new_data_pool
, new_metadata_pool
= f
"cephfs.{newvolname}.data", f
"cephfs.{newvolname}.meta"
559 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
560 "--yes-i-really-mean-it")
561 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
562 volnames
= [volume
['name'] for volume
in volumels
]
563 # volume name changed
564 self
.assertIn(newvolname
, volnames
)
565 self
.assertNotIn(oldvolname
, volnames
)
567 self
.fs
.get_pool_names(refresh
=True)
568 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
569 self
.assertEqual(new_data_pool
, self
.fs
.get_data_pool_name())
571 def test_volume_rename_idempotency(self
):
573 That volume rename is idempotent.
575 for m
in self
.mounts
:
577 oldvolname
= self
.volname
578 newvolname
= self
._generate
_random
_volume
_name
()
579 new_data_pool
, new_metadata_pool
= f
"cephfs.{newvolname}.data", f
"cephfs.{newvolname}.meta"
580 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
581 "--yes-i-really-mean-it")
582 self
._fs
_cmd
("volume", "rename", oldvolname
, newvolname
,
583 "--yes-i-really-mean-it")
584 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
585 volnames
= [volume
['name'] for volume
in volumels
]
586 self
.assertIn(newvolname
, volnames
)
587 self
.assertNotIn(oldvolname
, volnames
)
588 self
.fs
.get_pool_names(refresh
=True)
589 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
590 self
.assertEqual(new_data_pool
, self
.fs
.get_data_pool_name())
592 def test_volume_rename_fails_without_confirmation_flag(self
):
594 That renaming volume fails without --yes-i-really-mean-it flag.
596 newvolname
= self
._generate
_random
_volume
_name
()
598 self
._fs
_cmd
("volume", "rename", self
.volname
, newvolname
)
599 except CommandFailedError
as ce
:
600 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
601 "invalid error code on renaming a FS volume without the "
602 "'--yes-i-really-mean-it' flag")
604 self
.fail("expected renaming of FS volume to fail without the "
605 "'--yes-i-really-mean-it' flag")
607 def test_volume_rename_for_more_than_one_data_pool(self
):
609 That renaming a volume with more than one data pool does not change
610 the name of the data pools.
612 for m
in self
.mounts
:
614 self
.fs
.add_data_pool('another-data-pool')
615 oldvolname
= self
.volname
616 newvolname
= self
._generate
_random
_volume
_name
()
617 self
.fs
.get_pool_names(refresh
=True)
618 orig_data_pool_names
= list(self
.fs
.data_pools
.values())
619 new_metadata_pool
= f
"cephfs.{newvolname}.meta"
620 self
._fs
_cmd
("volume", "rename", self
.volname
, newvolname
,
621 "--yes-i-really-mean-it")
622 volumels
= json
.loads(self
._fs
_cmd
('volume', 'ls'))
623 volnames
= [volume
['name'] for volume
in volumels
]
624 # volume name changed
625 self
.assertIn(newvolname
, volnames
)
626 self
.assertNotIn(oldvolname
, volnames
)
627 self
.fs
.get_pool_names(refresh
=True)
628 # metadata pool name changed
629 self
.assertEqual(new_metadata_pool
, self
.fs
.get_metadata_pool_name())
630 # data pool names unchanged
631 self
.assertCountEqual(orig_data_pool_names
, list(self
.fs
.data_pools
.values()))
633 def test_volume_info(self
):
635 Tests the 'fs volume info' command
637 vol_fields
= ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
638 group
= self
._generate
_random
_group
_name
()
639 # create subvolumegroup
640 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
641 # get volume metadata
642 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
))
643 for md
in vol_fields
:
644 self
.assertIn(md
, vol_info
,
645 f
"'{md}' key not present in metadata of volume")
646 self
.assertEqual(vol_info
["used_size"], 0,
647 "Size should be zero when volumes directory is empty")
649 def test_volume_info_without_subvolumegroup(self
):
651 Tests the 'fs volume info' command without subvolume group
653 vol_fields
= ["pools", "mon_addrs"]
654 # get volume metadata
655 vol_info
= json
.loads(self
._get
_volume
_info
(self
.volname
))
656 for md
in vol_fields
:
657 self
.assertIn(md
, vol_info
,
658 f
"'{md}' key not present in metadata of volume")
659 self
.assertNotIn("used_size", vol_info
,
660 "'used_size' should not be present in absence of subvolumegroup")
661 self
.assertNotIn("pending_subvolume_deletions", vol_info
,
662 "'pending_subvolume_deletions' should not be present in absence"
663 " of subvolumegroup")
666 class TestSubvolumeGroups(TestVolumesHelper
):
667 """Tests for FS subvolume group operations."""
668 def test_default_uid_gid_subvolume_group(self
):
669 group
= self
._generate
_random
_group
_name
()
674 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
675 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
677 # check group's uid and gid
678 stat
= self
.mount_a
.stat(group_path
)
679 self
.assertEqual(stat
['st_uid'], expected_uid
)
680 self
.assertEqual(stat
['st_gid'], expected_gid
)
683 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
685 def test_nonexistent_subvolume_group_create(self
):
686 subvolume
= self
._generate
_random
_subvolume
_name
()
687 group
= "non_existent_group"
689 # try, creating subvolume in a nonexistent group
691 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
692 except CommandFailedError
as ce
:
693 if ce
.exitstatus
!= errno
.ENOENT
:
696 raise RuntimeError("expected the 'fs subvolume create' command to fail")
698 def test_nonexistent_subvolume_group_rm(self
):
699 group
= "non_existent_group"
701 # try, remove subvolume group
703 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
704 except CommandFailedError
as ce
:
705 if ce
.exitstatus
!= errno
.ENOENT
:
708 raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail")
710 def test_subvolume_group_create_with_auto_cleanup_on_fail(self
):
711 group
= self
._generate
_random
_group
_name
()
712 data_pool
= "invalid_pool"
713 # create group with invalid data pool layout
714 with self
.assertRaises(CommandFailedError
):
715 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", data_pool
)
717 # check whether group path is cleaned up
719 self
._fs
_cmd
("subvolumegroup", "getpath", self
.volname
, group
)
720 except CommandFailedError
as ce
:
721 if ce
.exitstatus
!= errno
.ENOENT
:
724 raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail")
726 def test_subvolume_group_create_with_desired_data_pool_layout(self
):
727 group1
, group2
= self
._generate
_random
_group
_name
(2)
730 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group1
)
731 group1_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group1
)
733 default_pool
= self
.mount_a
.getfattr(group1_path
, "ceph.dir.layout.pool")
734 new_pool
= "new_pool"
735 self
.assertNotEqual(default_pool
, new_pool
)
738 newid
= self
.fs
.add_data_pool(new_pool
)
740 # create group specifying the new data pool as its pool layout
741 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group2
,
742 "--pool_layout", new_pool
)
743 group2_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group2
)
745 desired_pool
= self
.mount_a
.getfattr(group2_path
, "ceph.dir.layout.pool")
747 self
.assertEqual(desired_pool
, new_pool
)
748 except AssertionError:
749 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
751 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group1
)
752 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group2
)
754 def test_subvolume_group_create_with_desired_mode(self
):
755 group1
, group2
= self
._generate
_random
_group
_name
(2)
757 expected_mode1
= "755"
759 expected_mode2
= "777"
762 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group2
, f
"--mode={expected_mode2}")
763 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group1
)
765 group1_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group1
)
766 group2_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group2
)
767 volumes_path
= os
.path
.dirname(group1_path
)
770 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', group1_path
]).stdout
.getvalue().strip()
771 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', group2_path
]).stdout
.getvalue().strip()
772 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', volumes_path
]).stdout
.getvalue().strip()
773 self
.assertEqual(actual_mode1
, expected_mode1
)
774 self
.assertEqual(actual_mode2
, expected_mode2
)
775 self
.assertEqual(actual_mode3
, expected_mode1
)
777 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group1
)
778 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group2
)
780 def test_subvolume_group_create_with_desired_uid_gid(self
):
782 That the subvolume group can be created with the desired uid and gid and its uid and gid matches the
788 # create subvolume group
789 subvolgroupname
= self
._generate
_random
_group
_name
()
790 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, subvolgroupname
, "--uid", str(uid
), "--gid", str(gid
))
792 # make sure it exists
793 subvolgrouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, subvolgroupname
)
794 self
.assertNotEqual(subvolgrouppath
, None)
796 # verify the uid and gid
797 suid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', subvolgrouppath
]).stdout
.getvalue().strip())
798 sgid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', subvolgrouppath
]).stdout
.getvalue().strip())
799 self
.assertEqual(uid
, suid
)
800 self
.assertEqual(gid
, sgid
)
803 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, subvolgroupname
)
805 def test_subvolume_group_create_with_invalid_data_pool_layout(self
):
806 group
= self
._generate
_random
_group
_name
()
807 data_pool
= "invalid_pool"
808 # create group with invalid data pool layout
810 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", data_pool
)
811 except CommandFailedError
as ce
:
812 if ce
.exitstatus
!= errno
.EINVAL
:
815 raise RuntimeError("expected the 'fs subvolumegroup create' command to fail")
817 def test_subvolume_group_create_with_size(self
):
818 # create group with size -- should set quota
819 group
= self
._generate
_random
_group
_name
()
820 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
823 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
824 self
.assertEqual(group_info
["bytes_quota"], 1000000000)
827 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
829 def test_subvolume_group_info(self
):
830 # tests the 'fs subvolumegroup info' command
832 group_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
833 "data_pool", "gid", "mode", "mon_addrs", "mtime", "uid"]
836 group
= self
._generate
_random
_group
_name
()
837 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
840 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
842 self
.assertIn(md
, group_info
, "'{0}' key not present in metadata of group".format(md
))
844 self
.assertEqual(group_info
["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
845 self
.assertEqual(group_info
["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
846 self
.assertEqual(group_info
["uid"], 0)
847 self
.assertEqual(group_info
["gid"], 0)
849 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
850 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
852 # get group metadata after quota set
853 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
855 self
.assertIn(md
, group_info
, "'{0}' key not present in metadata of subvolume".format(md
))
857 self
.assertNotEqual(group_info
["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is set")
858 self
.assertEqual(group_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
861 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
863 def test_subvolume_group_create_idempotence(self
):
865 group
= self
._generate
_random
_group
_name
()
866 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
868 # try creating w/ same subvolume group name -- should be idempotent
869 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
872 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
874 def test_subvolume_group_create_idempotence_mode(self
):
876 group
= self
._generate
_random
_group
_name
()
877 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
879 # try creating w/ same subvolume group name with mode -- should set mode
880 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--mode=766")
882 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
884 # check subvolumegroup's mode
885 mode
= self
.mount_a
.run_shell(['stat', '-c' '%a', group_path
]).stdout
.getvalue().strip()
886 self
.assertEqual(mode
, "766")
889 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
891 def test_subvolume_group_create_idempotence_uid_gid(self
):
896 group
= self
._generate
_random
_group
_name
()
897 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
899 # try creating w/ same subvolume group name with uid/gid -- should set uid/gid
900 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--uid", str(desired_uid
), "--gid", str(desired_gid
))
902 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
904 # verify the uid and gid
905 actual_uid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', group_path
]).stdout
.getvalue().strip())
906 actual_gid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', group_path
]).stdout
.getvalue().strip())
907 self
.assertEqual(desired_uid
, actual_uid
)
908 self
.assertEqual(desired_gid
, actual_gid
)
911 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
913 def test_subvolume_group_create_idempotence_data_pool(self
):
915 group
= self
._generate
_random
_group
_name
()
916 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
918 group_path
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
920 default_pool
= self
.mount_a
.getfattr(group_path
, "ceph.dir.layout.pool")
921 new_pool
= "new_pool"
922 self
.assertNotEqual(default_pool
, new_pool
)
925 newid
= self
.fs
.add_data_pool(new_pool
)
927 # try creating w/ same subvolume group name with new data pool -- should set pool
928 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--pool_layout", new_pool
)
929 desired_pool
= self
.mount_a
.getfattr(group_path
, "ceph.dir.layout.pool")
931 self
.assertEqual(desired_pool
, new_pool
)
932 except AssertionError:
933 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
936 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
938 def test_subvolume_group_create_idempotence_resize(self
):
940 group
= self
._generate
_random
_group
_name
()
941 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
943 # try creating w/ same subvolume name with size -- should set quota
944 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
947 group_info
= json
.loads(self
._get
_subvolume
_group
_info
(self
.volname
, group
))
948 self
.assertEqual(group_info
["bytes_quota"], 1000000000)
951 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
953 def test_subvolume_group_quota_mds_path_restriction_to_group_path(self
):
955 Tests subvolumegroup quota enforcement with mds path restriction set to group.
956 For quota to be enforced, read permission needs to be provided to the parent
957 of the directory on which quota is set. Please see the tracker comment [1]
958 [1] https://tracker.ceph.com/issues/55090#note-8
960 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
961 # create group with 100MB quota
962 group
= self
._generate
_random
_group
_name
()
963 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
964 "--size", str(osize
), "--mode=777")
966 # make sure it exists
967 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
968 self
.assertNotEqual(grouppath
, None)
970 # create subvolume under the group
971 subvolname
= self
._generate
_random
_subvolume
_name
()
972 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
973 "--group_name", group
, "--mode=777")
975 # make sure it exists
976 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
977 self
.assertNotEqual(subvolpath
, None)
980 authid
= "client.guest1"
981 user
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd(
982 "auth", "get-or-create", authid
,
983 "mds", "allow rw path=/volumes",
985 "osd", "allow rw tag cephfs *=*",
987 "--format=json-pretty"
990 # Prepare guest_mount with new authid
991 guest_mount
= self
.mount_b
992 guest_mount
.umount_wait()
994 # configure credentials for guest client
995 self
._configure
_guest
_auth
(guest_mount
, "guest1", user
[0]["key"])
997 # mount the subvolume
998 mount_path
= os
.path
.join("/", subvolpath
)
999 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1001 # create 99 files of 1MB
1002 guest_mount
.run_shell_payload("mkdir -p dir1")
1004 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1005 guest_mount
.write_n_mb(os
.path
.join("dir1", filename
), self
.DEFAULT_FILE_SIZE
)
1007 # write two files of 1MB file to exceed the quota
1008 guest_mount
.run_shell_payload("mkdir -p dir2")
1010 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1011 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1012 # For quota to be enforced
1014 # create 400 files of 1MB to exceed quota
1015 for i
in range(400):
1016 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1017 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1018 # Sometimes quota enforcement takes time.
1021 except CommandFailedError
:
1024 self
.fail(f
"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1027 guest_mount
.umount_wait()
1029 # Delete the subvolume
1030 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1033 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1035 # verify trash dir is clean
1036 self
._wait
_for
_trash
_empty
()
1038 def test_subvolume_group_quota_mds_path_restriction_to_subvolume_path(self
):
1040 Tests subvolumegroup quota enforcement with mds path restriction set to subvolume path
1041 The quota should not be enforced because of the fourth limitation mentioned at
1042 https://docs.ceph.com/en/latest/cephfs/quota/#limitations
1044 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1045 # create group with 100MB quota
1046 group
= self
._generate
_random
_group
_name
()
1047 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1048 "--size", str(osize
), "--mode=777")
1050 # make sure it exists
1051 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1052 self
.assertNotEqual(grouppath
, None)
1054 # create subvolume under the group
1055 subvolname
= self
._generate
_random
_subvolume
_name
()
1056 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1057 "--group_name", group
, "--mode=777")
1059 # make sure it exists
1060 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1061 self
.assertNotEqual(subvolpath
, None)
1063 mount_path
= os
.path
.join("/", subvolpath
)
1066 authid
= "client.guest1"
1067 user
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd(
1068 "auth", "get-or-create", authid
,
1069 "mds", f
"allow rw path={mount_path}",
1071 "osd", "allow rw tag cephfs *=*",
1073 "--format=json-pretty"
1076 # Prepare guest_mount with new authid
1077 guest_mount
= self
.mount_b
1078 guest_mount
.umount_wait()
1080 # configure credentials for guest client
1081 self
._configure
_guest
_auth
(guest_mount
, "guest1", user
[0]["key"])
1083 # mount the subvolume
1084 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
1086 # create 99 files of 1MB to exceed quota
1087 guest_mount
.run_shell_payload("mkdir -p dir1")
1089 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1090 guest_mount
.write_n_mb(os
.path
.join("dir1", filename
), self
.DEFAULT_FILE_SIZE
)
1092 # write two files of 1MB file to exceed the quota
1093 guest_mount
.run_shell_payload("mkdir -p dir2")
1095 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1096 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1097 # For quota to be enforced
1099 # create 400 files of 1MB to exceed quota
1100 for i
in range(400):
1101 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, i
)
1102 guest_mount
.write_n_mb(os
.path
.join("dir2", filename
), self
.DEFAULT_FILE_SIZE
)
1103 # Sometimes quota enforcement takes time.
1106 except CommandFailedError
:
1107 self
.fail(f
"Quota should not be enforced, expected filling subvolume {subvolname} with 400 files of size 1MB to succeed")
1110 guest_mount
.umount_wait()
1112 # Delete the subvolume
1113 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1116 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1118 # verify trash dir is clean
1119 self
._wait
_for
_trash
_empty
()
1121 def test_subvolume_group_quota_exceeded_subvolume_removal(self
):
1123 Tests subvolume removal if it's group quota is exceeded
1125 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1126 # create group with 100MB quota
1127 group
= self
._generate
_random
_group
_name
()
1128 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1129 "--size", str(osize
), "--mode=777")
1131 # make sure it exists
1132 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1133 self
.assertNotEqual(grouppath
, None)
1135 # create subvolume under the group
1136 subvolname
= self
._generate
_random
_subvolume
_name
()
1137 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1138 "--group_name", group
, "--mode=777")
1140 # make sure it exists
1141 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1142 self
.assertNotEqual(subvolpath
, None)
1144 # create 99 files of 1MB to exceed quota
1145 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=99)
1148 # write two files of 1MB file to exceed the quota
1149 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1150 # For quota to be enforced
1152 # create 400 files of 1MB to exceed quota
1153 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=400)
1154 except CommandFailedError
:
1155 # Delete subvolume when group quota is exceeded
1156 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1158 self
.fail(f
"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1161 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1163 # verify trash dir is clean
1164 self
._wait
_for
_trash
_empty
()
1166 def test_subvolume_group_quota_exceeded_subvolume_removal_retained_snaps(self
):
1168 Tests retained snapshot subvolume removal if it's group quota is exceeded
1170 group
= self
._generate
_random
_group
_name
()
1171 subvolname
= self
._generate
_random
_subvolume
_name
()
1172 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
1174 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1175 # create group with 100MB quota
1176 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1177 "--size", str(osize
), "--mode=777")
1179 # make sure it exists
1180 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1181 self
.assertNotEqual(grouppath
, None)
1183 # create subvolume under the group
1184 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1185 "--group_name", group
, "--mode=777")
1187 # make sure it exists
1188 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1189 self
.assertNotEqual(subvolpath
, None)
1191 # create 99 files of 1MB to exceed quota
1192 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=99)
1194 # snapshot subvolume
1195 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot1
, "--group_name", group
)
1196 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot2
, "--group_name", group
)
1199 # write two files of 1MB file to exceed the quota
1200 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1201 # For quota to be enforced
1203 # create 400 files of 1MB to exceed quota
1204 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=400)
1205 except CommandFailedError
:
1206 # remove with snapshot retention
1207 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
, "--retain-snapshots")
1209 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot1
, "--group_name", group
)
1210 # remove snapshot2 (should remove volume)
1211 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot2
, "--group_name", group
)
1212 # verify subvolume trash is clean
1213 self
._wait
_for
_subvol
_trash
_empty
(subvolname
, group
=group
)
1215 self
.fail(f
"expected filling subvolume {subvolname} with 400 files of size 1MB to fail")
1218 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1220 # verify trash dir is clean
1221 self
._wait
_for
_trash
_empty
()
1223 def test_subvolume_group_quota_subvolume_removal(self
):
1225 Tests subvolume removal if it's group quota is set.
1227 # create group with size -- should set quota
1228 group
= self
._generate
_random
_group
_name
()
1229 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
1231 # create subvolume under the group
1232 subvolname
= self
._generate
_random
_subvolume
_name
()
1233 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
1237 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1238 except CommandFailedError
:
1239 self
.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1241 # remove subvolumegroup
1242 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1244 # verify trash dir is clean
1245 self
._wait
_for
_trash
_empty
()
1247 def test_subvolume_group_quota_legacy_subvolume_removal(self
):
1249 Tests legacy subvolume removal if it's group quota is set.
1251 subvolume
= self
._generate
_random
_subvolume
_name
()
1252 group
= self
._generate
_random
_group
_name
()
1254 # emulate a old-fashioned subvolume -- in a custom group
1255 createpath1
= os
.path
.join(".", "volumes", group
, subvolume
)
1256 self
.mount_a
.run_shell(['mkdir', '-p', createpath1
], sudo
=True)
1258 # this would auto-upgrade on access without anyone noticing
1259 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
, "--group-name", group
)
1260 self
.assertNotEqual(subvolpath1
, None)
1261 subvolpath1
= subvolpath1
.rstrip() # remove "/" prefix and any trailing newline
1263 # and... the subvolume path returned should be what we created behind the scene
1264 self
.assertEqual(createpath1
[1:], subvolpath1
)
1266 # Set subvolumegroup quota on idempotent subvolumegroup creation
1267 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
1271 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1272 except CommandFailedError
:
1273 self
.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1275 # remove subvolumegroup
1276 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1278 # verify trash dir is clean
1279 self
._wait
_for
_trash
_empty
()
1281 def test_subvolume_group_quota_v1_subvolume_removal(self
):
1283 Tests v1 subvolume removal if it's group quota is set.
1285 subvolume
= self
._generate
_random
_subvolume
_name
()
1286 group
= self
._generate
_random
_group
_name
()
1288 # emulate a v1 subvolume -- in a custom group
1289 self
._create
_v
1_subvolume
(subvolume
, subvol_group
=group
, has_snapshot
=False)
1291 # Set subvolumegroup quota on idempotent subvolumegroup creation
1292 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "1000000000")
1296 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
1297 except CommandFailedError
:
1298 self
.fail("expected the 'fs subvolume rm' command to succeed if group quota is set")
1300 # remove subvolumegroup
1301 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1303 # verify trash dir is clean
1304 self
._wait
_for
_trash
_empty
()
1306 def test_subvolume_group_resize_fail_invalid_size(self
):
1308 That a subvolume group cannot be resized to an invalid size and the quota did not change
1311 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1312 # create group with 1MB quota
1313 group
= self
._generate
_random
_group
_name
()
1314 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--size", str(osize
))
1316 # make sure it exists
1317 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1318 self
.assertNotEqual(grouppath
, None)
1320 # try to resize the subvolume with an invalid size -10
1323 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1324 except CommandFailedError
as ce
:
1325 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
1326 "invalid error code on resize of subvolume group with invalid size")
1328 self
.fail("expected the 'fs subvolumegroup resize' command to fail")
1330 # verify the quota did not change
1331 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1332 self
.assertEqual(size
, osize
)
1335 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1337 def test_subvolume_group_resize_fail_zero_size(self
):
1339 That a subvolume group cannot be resized to a zero size and the quota did not change
1342 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1343 # create group with 1MB quota
1344 group
= self
._generate
_random
_group
_name
()
1345 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--size", str(osize
))
1347 # make sure it exists
1348 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1349 self
.assertNotEqual(grouppath
, None)
1351 # try to resize the subvolume group with size 0
1354 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1355 except CommandFailedError
as ce
:
1356 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
,
1357 "invalid error code on resize of subvolume group with invalid size")
1359 self
.fail("expected the 'fs subvolumegroup resize' command to fail")
1361 # verify the quota did not change
1362 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1363 self
.assertEqual(size
, osize
)
1366 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1368 def test_subvolume_group_resize_quota_lt_used_size(self
):
1370 That a subvolume group can be resized to a size smaller than the current used size
1371 and the resulting quota matches the expected size.
1374 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
1375 # create group with 20MB quota
1376 group
= self
._generate
_random
_group
_name
()
1377 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1378 "--size", str(osize
), "--mode=777")
1380 # make sure it exists
1381 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1382 self
.assertNotEqual(grouppath
, None)
1384 # create subvolume under the group
1385 subvolname
= self
._generate
_random
_subvolume
_name
()
1386 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1387 "--group_name", group
, "--mode=777")
1389 # make sure it exists
1390 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1391 self
.assertNotEqual(subvolpath
, None)
1393 # create one file of 10MB
1394 file_size
=self
.DEFAULT_FILE_SIZE
*10
1396 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
1399 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+1)
1400 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
1402 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
1404 # shrink the subvolume group
1405 nsize
= usedsize
// 2
1407 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1408 except CommandFailedError
:
1409 self
.fail("expected the 'fs subvolumegroup resize' command to succeed")
1412 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1413 self
.assertEqual(size
, nsize
)
1415 # remove subvolume and group
1416 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1417 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1419 # verify trash dir is clean
1420 self
._wait
_for
_trash
_empty
()
1422 def test_subvolume_group_resize_fail_quota_lt_used_size_no_shrink(self
):
1424 That a subvolume group cannot be resized to a size smaller than the current used size
1425 when --no_shrink is given and the quota did not change.
1428 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
1429 # create group with 20MB quota
1430 group
= self
._generate
_random
_group
_name
()
1431 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1432 "--size", str(osize
), "--mode=777")
1434 # make sure it exists
1435 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1436 self
.assertNotEqual(grouppath
, None)
1438 # create subvolume under the group
1439 subvolname
= self
._generate
_random
_subvolume
_name
()
1440 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1441 "--group_name", group
, "--mode=777")
1443 # make sure it exists
1444 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1445 self
.assertNotEqual(subvolpath
, None)
1447 # create one file of 10MB
1448 file_size
=self
.DEFAULT_FILE_SIZE
*10
1450 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
1453 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+2)
1454 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
1456 usedsize
= int(self
.mount_a
.getfattr(grouppath
, "ceph.dir.rbytes"))
1458 # shrink the subvolume group
1459 nsize
= usedsize
// 2
1461 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
), "--no_shrink")
1462 except CommandFailedError
as ce
:
1463 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolumegroup with quota less than used")
1465 self
.fail("expected the 'fs subvolumegroup resize' command to fail")
1467 # verify the quota did not change
1468 size
= int(self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes"))
1469 self
.assertEqual(size
, osize
)
1471 # remove subvolume and group
1472 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1473 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1475 # verify trash dir is clean
1476 self
._wait
_for
_trash
_empty
()
1478 def test_subvolume_group_resize_expand_on_full_subvolume(self
):
1480 That the subvolume group can be expanded after it is full and future write succeed
1483 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*100
1484 # create group with 100MB quota
1485 group
= self
._generate
_random
_group
_name
()
1486 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1487 "--size", str(osize
), "--mode=777")
1489 # make sure it exists
1490 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1491 self
.assertNotEqual(grouppath
, None)
1493 # create subvolume under the group
1494 subvolname
= self
._generate
_random
_subvolume
_name
()
1495 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1496 "--group_name", group
, "--mode=777")
1498 # make sure it exists
1499 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1500 self
.assertNotEqual(subvolpath
, None)
1502 # create 99 files of 1MB
1503 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=99)
1506 # write two files of 1MB file to exceed the quota
1507 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1508 # For quota to be enforced
1510 # create 500 files of 1MB
1511 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1512 except CommandFailedError
:
1513 # Not able to write. So expand the subvolumegroup more and try writing the files again
1515 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, str(nsize
))
1517 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1518 except CommandFailedError
:
1519 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1520 "to succeed".format(subvolname
))
1522 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1523 "to fail".format(subvolname
))
1525 # remove subvolume and group
1526 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1527 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1529 # verify trash dir is clean
1530 self
._wait
_for
_trash
_empty
()
1532 def test_subvolume_group_resize_infinite_size(self
):
1534 That a subvolume group can be resized to an infinite size by unsetting its quota.
1537 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
1539 group
= self
._generate
_random
_group
_name
()
1540 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1541 "--size", str(osize
))
1543 # make sure it exists
1544 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1545 self
.assertNotEqual(grouppath
, None)
1548 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, "inf")
1550 # verify that the quota is None
1551 size
= self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes")
1552 self
.assertEqual(size
, None)
1554 # remove subvolume group
1555 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1557 def test_subvolume_group_resize_infinite_size_future_writes(self
):
1559 That a subvolume group can be resized to an infinite size and the future writes succeed.
1562 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*5
1563 # create group with 5MB quota
1564 group
= self
._generate
_random
_group
_name
()
1565 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
,
1566 "--size", str(osize
), "--mode=777")
1568 # make sure it exists
1569 grouppath
= self
._get
_subvolume
_group
_path
(self
.volname
, group
)
1570 self
.assertNotEqual(grouppath
, None)
1572 # create subvolume under the group
1573 subvolname
= self
._generate
_random
_subvolume
_name
()
1574 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
,
1575 "--group_name", group
, "--mode=777")
1577 # make sure it exists
1578 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
, group_name
=group
)
1579 self
.assertNotEqual(subvolpath
, None)
1581 # create 4 files of 1MB
1582 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, number_of_files
=4)
1585 # write two files of 1MB file to exceed the quota
1586 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=2)
1587 # For quota to be enforced
1589 # create 500 files of 1MB
1590 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1591 except CommandFailedError
:
1592 # Not able to write. So resize subvolumegroup to 'inf' and try writing the files again
1594 self
._fs
_cmd
("subvolumegroup", "resize", self
.volname
, group
, "inf")
1596 self
._do
_subvolume
_io
(subvolname
, subvolume_group
=group
, create_dir
='dir1', number_of_files
=500)
1597 except CommandFailedError
:
1598 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1599 "to succeed".format(subvolname
))
1601 self
.fail("expected filling subvolume {0} with 500 files of size 1MB "
1602 "to fail".format(subvolname
))
1605 # verify that the quota is None
1606 size
= self
.mount_a
.getfattr(grouppath
, "ceph.quota.max_bytes")
1607 self
.assertEqual(size
, None)
1609 # remove subvolume and group
1610 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, "--group_name", group
)
1611 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1613 # verify trash dir is clean
1614 self
._wait
_for
_trash
_empty
()
1616 def test_subvolume_group_ls(self
):
1617 # tests the 'fs subvolumegroup ls' command
1619 subvolumegroups
= []
1621 #create subvolumegroups
1622 subvolumegroups
= self
._generate
_random
_group
_name
(3)
1623 for groupname
in subvolumegroups
:
1624 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
1626 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1627 if len(subvolumegroupls
) == 0:
1628 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups")
1630 subvolgroupnames
= [subvolumegroup
['name'] for subvolumegroup
in subvolumegroupls
]
1631 if collections
.Counter(subvolgroupnames
) != collections
.Counter(subvolumegroups
):
1632 raise RuntimeError("Error creating or listing subvolume groups")
1634 def test_subvolume_group_ls_filter(self
):
1635 # tests the 'fs subvolumegroup ls' command filters '_deleting' directory
1637 subvolumegroups
= []
1639 #create subvolumegroup
1640 subvolumegroups
= self
._generate
_random
_group
_name
(3)
1641 for groupname
in subvolumegroups
:
1642 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
1644 # create subvolume and remove. This creates '_deleting' directory.
1645 subvolume
= self
._generate
_random
_subvolume
_name
()
1646 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1647 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1649 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1650 subvolgroupnames
= [subvolumegroup
['name'] for subvolumegroup
in subvolumegroupls
]
1651 if "_deleting" in subvolgroupnames
:
1652 self
.fail("Listing subvolume groups listed '_deleting' directory")
1654 def test_subvolume_group_ls_filter_internal_directories(self
):
1655 # tests the 'fs subvolumegroup ls' command filters internal directories
1656 # eg: '_deleting', '_nogroup', '_index', "_legacy"
1658 subvolumegroups
= self
._generate
_random
_group
_name
(3)
1659 subvolume
= self
._generate
_random
_subvolume
_name
()
1660 snapshot
= self
._generate
_random
_snapshot
_name
()
1661 clone
= self
._generate
_random
_clone
_name
()
1663 #create subvolumegroups
1664 for groupname
in subvolumegroups
:
1665 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, groupname
)
1667 # create subvolume which will create '_nogroup' directory
1668 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1671 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
1673 # clone snapshot which will create '_index' directory
1674 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
1677 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
1679 # remove subvolume which will create '_deleting' directory
1680 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1682 # list subvolumegroups
1683 ret
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1684 self
.assertEqual(len(ret
), len(subvolumegroups
))
1686 ret_list
= [subvolumegroup
['name'] for subvolumegroup
in ret
]
1687 self
.assertEqual(len(ret_list
), len(subvolumegroups
))
1689 self
.assertEqual(all(elem
in subvolumegroups
for elem
in ret_list
), True)
1691 def test_subvolume_group_ls_for_nonexistent_volume(self
):
1692 # tests the 'fs subvolumegroup ls' command when /volume doesn't exist
1693 # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created
1695 # list subvolume groups
1696 subvolumegroupls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'ls', self
.volname
))
1697 if len(subvolumegroupls
) > 0:
1698 raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list")
1700 def test_subvolumegroup_pin_distributed(self
):
1701 self
.fs
.set_max_mds(2)
1702 status
= self
.fs
.wait_for_daemons()
1703 self
.config_set('mds', 'mds_export_ephemeral_distributed', True)
1706 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1707 self
._fs
_cmd
("subvolumegroup", "pin", self
.volname
, group
, "distributed", "True")
1708 subvolumes
= self
._generate
_random
_subvolume
_name
(50)
1709 for subvolume
in subvolumes
:
1710 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1711 self
._wait
_distributed
_subtrees
(2 * 2, status
=status
, rank
="all")
1714 for subvolume
in subvolumes
:
1715 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
1717 # verify trash dir is clean
1718 self
._wait
_for
_trash
_empty
()
1720 def test_subvolume_group_rm_force(self
):
1721 # test removing non-existing subvolume group with --force
1722 group
= self
._generate
_random
_group
_name
()
1724 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
, "--force")
1725 except CommandFailedError
:
1726 raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed")
1728 def test_subvolume_group_exists_with_subvolumegroup_and_no_subvolume(self
):
1729 """Test the presence of any subvolumegroup when only subvolumegroup is present"""
1731 group
= self
._generate
_random
_group
_name
()
1732 # create subvolumegroup
1733 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1734 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1735 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1736 # delete subvolumegroup
1737 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1738 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1739 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1741 def test_subvolume_group_exists_with_no_subvolumegroup_and_subvolume(self
):
1742 """Test the presence of any subvolumegroup when no subvolumegroup is present"""
1744 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1745 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1747 def test_subvolume_group_exists_with_subvolumegroup_and_subvolume(self
):
1748 """Test the presence of any subvolume when subvolumegroup
1749 and subvolume both are present"""
1751 group
= self
._generate
_random
_group
_name
()
1752 subvolume
= self
._generate
_random
_subvolume
_name
(2)
1753 # create subvolumegroup
1754 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1755 # create subvolume in group
1756 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
[0], "--group_name", group
)
1758 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
[1])
1759 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1760 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1761 # delete subvolume in group
1762 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
[0], "--group_name", group
)
1763 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1764 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1766 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
[1])
1767 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1768 self
.assertEqual(ret
.strip('\n'), "subvolumegroup exists")
1769 # delete subvolumegroup
1770 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1771 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1772 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1774 def test_subvolume_group_exists_without_subvolumegroup_and_with_subvolume(self
):
1775 """Test the presence of any subvolume when subvolume is present
1776 but no subvolumegroup is present"""
1778 subvolume
= self
._generate
_random
_subvolume
_name
()
1780 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1781 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1782 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1784 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1785 ret
= self
._fs
_cmd
("subvolumegroup", "exist", self
.volname
)
1786 self
.assertEqual(ret
.strip('\n'), "no subvolumegroup exists")
1789 class TestSubvolumes(TestVolumesHelper
):
1790 """Tests for FS subvolume operations, except snapshot and snapshot clone."""
1791 def test_async_subvolume_rm(self
):
1792 subvolumes
= self
._generate
_random
_subvolume
_name
(100)
1795 for subvolume
in subvolumes
:
1796 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
1797 self
._do
_subvolume
_io
(subvolume
, number_of_files
=10)
1799 self
.mount_a
.umount_wait()
1802 for subvolume
in subvolumes
:
1803 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1805 self
.mount_a
.mount_wait()
1807 # verify trash dir is clean
1808 self
._wait
_for
_trash
_empty
(timeout
=300)
1810 def test_default_uid_gid_subvolume(self
):
1811 subvolume
= self
._generate
_random
_subvolume
_name
()
1816 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1817 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
1819 # check subvolume's uid and gid
1820 stat
= self
.mount_a
.stat(subvol_path
)
1821 self
.assertEqual(stat
['st_uid'], expected_uid
)
1822 self
.assertEqual(stat
['st_gid'], expected_gid
)
1825 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1827 # verify trash dir is clean
1828 self
._wait
_for
_trash
_empty
()
1830 def test_nonexistent_subvolume_rm(self
):
1831 # remove non-existing subvolume
1832 subvolume
= "non_existent_subvolume"
1834 # try, remove subvolume
1836 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1837 except CommandFailedError
as ce
:
1838 if ce
.exitstatus
!= errno
.ENOENT
:
1841 raise RuntimeError("expected the 'fs subvolume rm' command to fail")
1843 def test_subvolume_create_and_rm(self
):
1845 subvolume
= self
._generate
_random
_subvolume
_name
()
1846 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1848 # make sure it exists
1849 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1850 self
.assertNotEqual(subvolpath
, None)
1853 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1854 # make sure its gone
1856 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1857 except CommandFailedError
as ce
:
1858 if ce
.exitstatus
!= errno
.ENOENT
:
1861 raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.")
1863 # verify trash dir is clean
1864 self
._wait
_for
_trash
_empty
()
1866 def test_subvolume_create_and_rm_in_group(self
):
1867 subvolume
= self
._generate
_random
_subvolume
_name
()
1868 group
= self
._generate
_random
_group
_name
()
1871 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
1873 # create subvolume in group
1874 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
1877 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
1879 # verify trash dir is clean
1880 self
._wait
_for
_trash
_empty
()
1883 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
1885 def test_subvolume_create_idempotence(self
):
1887 subvolume
= self
._generate
_random
_subvolume
_name
()
1888 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1890 # try creating w/ same subvolume name -- should be idempotent
1891 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1894 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1896 # verify trash dir is clean
1897 self
._wait
_for
_trash
_empty
()
1899 def test_subvolume_create_idempotence_resize(self
):
1901 subvolume
= self
._generate
_random
_subvolume
_name
()
1902 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1904 # try creating w/ same subvolume name with size -- should set quota
1905 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "1000000000")
1907 # get subvolume metadata
1908 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
1909 self
.assertEqual(subvol_info
["bytes_quota"], 1000000000)
1912 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1914 # verify trash dir is clean
1915 self
._wait
_for
_trash
_empty
()
1917 def test_subvolume_create_idempotence_mode(self
):
1919 default_mode
= "755"
1922 subvolume
= self
._generate
_random
_subvolume
_name
()
1923 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1925 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
1927 actual_mode_1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
1928 self
.assertEqual(actual_mode_1
, default_mode
)
1930 # try creating w/ same subvolume name with --mode 777
1932 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", new_mode
)
1934 actual_mode_2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
1935 self
.assertEqual(actual_mode_2
, new_mode
)
1938 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1940 # verify trash dir is clean
1941 self
._wait
_for
_trash
_empty
()
1943 def test_subvolume_create_idempotence_without_passing_mode(self
):
1945 desired_mode
= "777"
1946 subvolume
= self
._generate
_random
_subvolume
_name
()
1947 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", desired_mode
)
1949 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
1951 actual_mode_1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
1952 self
.assertEqual(actual_mode_1
, desired_mode
)
1955 default_mode
= "755"
1957 # try creating w/ same subvolume name without passing --mode argument
1958 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
1960 actual_mode_2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol_path
]).stdout
.getvalue().strip()
1961 self
.assertEqual(actual_mode_2
, default_mode
)
1964 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1966 # verify trash dir is clean
1967 self
._wait
_for
_trash
_empty
()
1969 def test_subvolume_create_isolated_namespace(self
):
1971 Create subvolume in separate rados namespace
1975 subvolume
= self
._generate
_random
_subvolume
_name
()
1976 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--namespace-isolated")
1978 # get subvolume metadata
1979 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
1980 self
.assertNotEqual(len(subvol_info
), 0)
1981 self
.assertEqual(subvol_info
["pool_namespace"], "fsvolumens_" + subvolume
)
1984 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
1986 # verify trash dir is clean
1987 self
._wait
_for
_trash
_empty
()
1989 def test_subvolume_create_with_auto_cleanup_on_fail(self
):
1990 subvolume
= self
._generate
_random
_subvolume
_name
()
1991 data_pool
= "invalid_pool"
1992 # create subvolume with invalid data pool layout fails
1993 with self
.assertRaises(CommandFailedError
):
1994 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
1996 # check whether subvol path is cleaned up
1998 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
1999 except CommandFailedError
as ce
:
2000 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of non-existent subvolume")
2002 self
.fail("expected the 'fs subvolume getpath' command to fail")
2004 # verify trash dir is clean
2005 self
._wait
_for
_trash
_empty
()
2007 def test_subvolume_create_with_desired_data_pool_layout_in_group(self
):
2008 subvol1
, subvol2
= self
._generate
_random
_subvolume
_name
(2)
2009 group
= self
._generate
_random
_group
_name
()
2011 # create group. this also helps set default pool layout for subvolumes
2012 # created within the group.
2013 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2015 # create subvolume in group.
2016 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
2017 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
, group_name
=group
)
2019 default_pool
= self
.mount_a
.getfattr(subvol1_path
, "ceph.dir.layout.pool")
2020 new_pool
= "new_pool"
2021 self
.assertNotEqual(default_pool
, new_pool
)
2024 newid
= self
.fs
.add_data_pool(new_pool
)
2026 # create subvolume specifying the new data pool as its pool layout
2027 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
,
2028 "--pool_layout", new_pool
)
2029 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvol2
, group_name
=group
)
2031 desired_pool
= self
.mount_a
.getfattr(subvol2_path
, "ceph.dir.layout.pool")
2033 self
.assertEqual(desired_pool
, new_pool
)
2034 except AssertionError:
2035 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
2037 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
2038 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
2039 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2041 # verify trash dir is clean
2042 self
._wait
_for
_trash
_empty
()
2044 def test_subvolume_create_with_desired_mode(self
):
2045 subvol1
= self
._generate
_random
_subvolume
_name
()
2048 default_mode
= "755"
2050 desired_mode
= "777"
2052 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--mode", "777")
2054 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
)
2056 # check subvolumegroup's mode
2057 subvol_par_path
= os
.path
.dirname(subvol1_path
)
2058 group_path
= os
.path
.dirname(subvol_par_path
)
2059 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', group_path
]).stdout
.getvalue().strip()
2060 self
.assertEqual(actual_mode1
, default_mode
)
2061 # check /volumes mode
2062 volumes_path
= os
.path
.dirname(group_path
)
2063 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', volumes_path
]).stdout
.getvalue().strip()
2064 self
.assertEqual(actual_mode2
, default_mode
)
2065 # check subvolume's mode
2066 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol1_path
]).stdout
.getvalue().strip()
2067 self
.assertEqual(actual_mode3
, desired_mode
)
2069 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
)
2071 # verify trash dir is clean
2072 self
._wait
_for
_trash
_empty
()
2074 def test_subvolume_create_with_desired_mode_in_group(self
):
2075 subvol1
, subvol2
, subvol3
= self
._generate
_random
_subvolume
_name
(3)
2077 group
= self
._generate
_random
_group
_name
()
2079 expected_mode1
= "755"
2081 expected_mode2
= "777"
2084 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2086 # create subvolume in group
2087 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol1
, "--group_name", group
)
2088 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
, "--group_name", group
, "--mode", "777")
2089 # check whether mode 0777 also works
2090 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol3
, "--group_name", group
, "--mode", "0777")
2092 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvol1
, group_name
=group
)
2093 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvol2
, group_name
=group
)
2094 subvol3_path
= self
._get
_subvolume
_path
(self
.volname
, subvol3
, group_name
=group
)
2096 # check subvolume's mode
2097 actual_mode1
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol1_path
]).stdout
.getvalue().strip()
2098 actual_mode2
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol2_path
]).stdout
.getvalue().strip()
2099 actual_mode3
= self
.mount_a
.run_shell(['stat', '-c' '%a', subvol3_path
]).stdout
.getvalue().strip()
2100 self
.assertEqual(actual_mode1
, expected_mode1
)
2101 self
.assertEqual(actual_mode2
, expected_mode2
)
2102 self
.assertEqual(actual_mode3
, expected_mode2
)
2104 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
, group
)
2105 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
, group
)
2106 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol3
, group
)
2107 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2109 # verify trash dir is clean
2110 self
._wait
_for
_trash
_empty
()
2112 def test_subvolume_create_with_desired_uid_gid(self
):
2114 That the subvolume can be created with the desired uid and gid and its uid and gid matches the
2121 subvolname
= self
._generate
_random
_subvolume
_name
()
2122 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--uid", str(uid
), "--gid", str(gid
))
2124 # make sure it exists
2125 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2126 self
.assertNotEqual(subvolpath
, None)
2128 # verify the uid and gid
2129 suid
= int(self
.mount_a
.run_shell(['stat', '-c' '%u', subvolpath
]).stdout
.getvalue().strip())
2130 sgid
= int(self
.mount_a
.run_shell(['stat', '-c' '%g', subvolpath
]).stdout
.getvalue().strip())
2131 self
.assertEqual(uid
, suid
)
2132 self
.assertEqual(gid
, sgid
)
2135 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2137 # verify trash dir is clean
2138 self
._wait
_for
_trash
_empty
()
2140 def test_subvolume_create_with_invalid_data_pool_layout(self
):
2141 subvolume
= self
._generate
_random
_subvolume
_name
()
2142 data_pool
= "invalid_pool"
2143 # create subvolume with invalid data pool layout
2145 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
2146 except CommandFailedError
as ce
:
2147 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on create of subvolume with invalid pool layout")
2149 self
.fail("expected the 'fs subvolume create' command to fail")
2151 # verify trash dir is clean
2152 self
._wait
_for
_trash
_empty
()
2154 def test_subvolume_create_with_invalid_size(self
):
2155 # create subvolume with an invalid size -1
2156 subvolume
= self
._generate
_random
_subvolume
_name
()
2158 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--size", "-1")
2159 except CommandFailedError
as ce
:
2160 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on create of subvolume with invalid size")
2162 self
.fail("expected the 'fs subvolume create' command to fail")
2164 # verify trash dir is clean
2165 self
._wait
_for
_trash
_empty
()
2167 def test_subvolume_create_and_ls_providing_group_as_nogroup(self
):
2169 That a 'subvolume create' and 'subvolume ls' should throw
2170 permission denied error if option --group=_nogroup is provided.
2173 subvolname
= self
._generate
_random
_subvolume
_name
()
2175 # try to create subvolume providing --group_name=_nogroup option
2177 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", "_nogroup")
2178 except CommandFailedError
as ce
:
2179 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
)
2181 self
.fail("expected the 'fs subvolume create' command to fail")
2184 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
)
2186 # try to list subvolumes providing --group_name=_nogroup option
2188 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_nogroup")
2189 except CommandFailedError
as ce
:
2190 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
)
2192 self
.fail("expected the 'fs subvolume ls' command to fail")
2195 self
._fs
_cmd
("subvolume", "ls", self
.volname
)
2197 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2199 # verify trash dir is clean.
2200 self
._wait
_for
_trash
_empty
()
2202 def test_subvolume_expand(self
):
2204 That a subvolume can be expanded in size and its quota matches the expected size.
2208 subvolname
= self
._generate
_random
_subvolume
_name
()
2209 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2210 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
2212 # make sure it exists
2213 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
2214 self
.assertNotEqual(subvolpath
, None)
2216 # expand the subvolume
2218 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
2221 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
2222 self
.assertEqual(size
, nsize
)
2225 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
2227 # verify trash dir is clean
2228 self
._wait
_for
_trash
_empty
()
2230 def test_subvolume_info(self
):
2231 # tests the 'fs subvolume info' command
2233 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
2234 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
2235 "type", "uid", "features", "state"]
2238 subvolume
= self
._generate
_random
_subvolume
_name
()
2239 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2241 # get subvolume metadata
2242 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
2243 for md
in subvol_md
:
2244 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
2246 self
.assertEqual(subvol_info
["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
2247 self
.assertEqual(subvol_info
["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
2248 self
.assertEqual(subvol_info
["pool_namespace"], "", "expected pool namespace to be empty")
2249 self
.assertEqual(subvol_info
["state"], "complete", "expected state to be complete")
2251 self
.assertEqual(len(subvol_info
["features"]), 3,
2252 msg
="expected 3 features, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
2253 for feature
in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2254 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
2256 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
2257 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
2259 # get subvolume metadata after quota set
2260 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
2261 for md
in subvol_md
:
2262 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
2264 self
.assertNotEqual(subvol_info
["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
2265 self
.assertEqual(subvol_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
2266 self
.assertEqual(subvol_info
["type"], "subvolume", "type should be set to subvolume")
2267 self
.assertEqual(subvol_info
["state"], "complete", "expected state to be complete")
2269 self
.assertEqual(len(subvol_info
["features"]), 3,
2270 msg
="expected 3 features, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
2271 for feature
in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
2272 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
2275 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2277 # verify trash dir is clean
2278 self
._wait
_for
_trash
_empty
()
2280 def test_subvolume_ls(self
):
2281 # tests the 'fs subvolume ls' command
2286 subvolumes
= self
._generate
_random
_subvolume
_name
(3)
2287 for subvolume
in subvolumes
:
2288 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2291 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
2292 if len(subvolumels
) == 0:
2293 self
.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.")
2295 subvolnames
= [subvolume
['name'] for subvolume
in subvolumels
]
2296 if collections
.Counter(subvolnames
) != collections
.Counter(subvolumes
):
2297 self
.fail("Error creating or listing subvolumes")
2300 for subvolume
in subvolumes
:
2301 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2303 # verify trash dir is clean
2304 self
._wait
_for
_trash
_empty
()
2306 def test_subvolume_ls_with_groupname_as_internal_directory(self
):
2307 # tests the 'fs subvolume ls' command when the default groupname as internal directories
2308 # Eg: '_nogroup', '_legacy', '_deleting', '_index'.
2309 # Expecting 'fs subvolume ls' will be fail with errno EINVAL for '_legacy', '_deleting', '_index'
2310 # Expecting 'fs subvolume ls' will be fail with errno EPERM for '_nogroup'
2312 # try to list subvolumes providing --group_name=_nogroup option
2314 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_nogroup")
2315 except CommandFailedError
as ce
:
2316 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
)
2318 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EPERM' for _nogroup")
2320 # try to list subvolumes providing --group_name=_legacy option
2322 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_legacy")
2323 except CommandFailedError
as ce
:
2324 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
)
2326 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _legacy")
2328 # try to list subvolumes providing --group_name=_deleting option
2330 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_deleting")
2331 except CommandFailedError
as ce
:
2332 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
)
2334 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _deleting")
2336 # try to list subvolumes providing --group_name=_index option
2338 self
._fs
_cmd
("subvolume", "ls", self
.volname
, "--group_name", "_index")
2339 except CommandFailedError
as ce
:
2340 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
)
2342 self
.fail("expected the 'fs subvolume ls' command to fail with error 'EINVAL' for _index")
2344 def test_subvolume_ls_for_notexistent_default_group(self
):
2345 # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist
2346 # prerequisite: we expect that the volume is created and the default group _nogroup is
2347 # NOT created (i.e. a subvolume without group is not created)
2350 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
2351 if len(subvolumels
) > 0:
2352 raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.")
2354 def test_subvolume_marked(self
):
2356 ensure a subvolume is marked with the ceph.dir.subvolume xattr
2358 subvolume
= self
._generate
_random
_subvolume
_name
()
2361 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2364 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
2366 # subdirectory of a subvolume cannot be moved outside the subvolume once marked with
2367 # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation)
2368 # outside the subvolume
2369 dstpath
= os
.path
.join(self
.mount_a
.mountpoint
, 'volumes', '_nogroup', 'new_subvol_location')
2370 srcpath
= os
.path
.join(self
.mount_a
.mountpoint
, subvolpath
)
2371 rename_script
= dedent("""
2375 os.rename("{src}", "{dst}")
2376 except OSError as e:
2377 if e.errno != errno.EXDEV:
2378 raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory")
2380 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
2382 self
.mount_a
.run_python(rename_script
.format(src
=srcpath
, dst
=dstpath
), sudo
=True)
2385 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2387 # verify trash dir is clean
2388 self
._wait
_for
_trash
_empty
()
2390 def test_subvolume_pin_export(self
):
2391 self
.fs
.set_max_mds(2)
2392 status
= self
.fs
.wait_for_daemons()
2394 subvolume
= self
._generate
_random
_subvolume
_name
()
2395 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
2396 self
._fs
_cmd
("subvolume", "pin", self
.volname
, subvolume
, "export", "1")
2397 path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
2398 path
= os
.path
.dirname(path
) # get subvolume path
2400 self
._get
_subtrees
(status
=status
, rank
=1)
2401 self
._wait
_subtrees
([(path
, 1)], status
=status
)
2404 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
2406 # verify trash dir is clean
2407 self
._wait
_for
_trash
_empty
()
2409 ### authorize operations
2411 def test_authorize_deauthorize_legacy_subvolume(self
):
2412 subvolume
= self
._generate
_random
_subvolume
_name
()
2413 group
= self
._generate
_random
_group
_name
()
2416 guest_mount
= self
.mount_b
2417 guest_mount
.umount_wait()
2419 # emulate a old-fashioned subvolume in a custom group
2420 createpath
= os
.path
.join(".", "volumes", group
, subvolume
)
2421 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
2423 # add required xattrs to subvolume
2424 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
2425 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
2427 mount_path
= os
.path
.join("/", "volumes", group
, subvolume
)
2429 # authorize guest authID read-write access to subvolume
2430 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2431 "--group_name", group
, "--tenant_id", "tenant_id")
2433 # guest authID should exist
2434 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2435 self
.assertIn("client.{0}".format(authid
), existing_ids
)
2437 # configure credentials for guest client
2438 self
._configure
_guest
_auth
(guest_mount
, authid
, key
)
2440 # mount the subvolume, and write to it
2441 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2442 guest_mount
.write_n_mb("data.bin", 1)
2444 # authorize guest authID read access to subvolume
2445 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2446 "--group_name", group
, "--tenant_id", "tenant_id", "--access_level", "r")
2448 # guest client sees the change in access level to read only after a
2449 # remount of the subvolume.
2450 guest_mount
.umount_wait()
2451 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2453 # read existing content of the subvolume
2454 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
2455 # cannot write into read-only subvolume
2456 with self
.assertRaises(CommandFailedError
):
2457 guest_mount
.write_n_mb("rogue.bin", 1)
2460 guest_mount
.umount_wait()
2461 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid
,
2462 "--group_name", group
)
2463 # guest authID should no longer exist
2464 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2465 self
.assertNotIn("client.{0}".format(authid
), existing_ids
)
2466 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2467 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2469 def test_authorize_deauthorize_subvolume(self
):
2470 subvolume
= self
._generate
_random
_subvolume
_name
()
2471 group
= self
._generate
_random
_group
_name
()
2474 guest_mount
= self
.mount_b
2475 guest_mount
.umount_wait()
2478 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
, "--mode=777")
2480 # create subvolume in group
2481 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2482 mount_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
,
2483 "--group_name", group
).rstrip()
2485 # authorize guest authID read-write access to subvolume
2486 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2487 "--group_name", group
, "--tenant_id", "tenant_id")
2489 # guest authID should exist
2490 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2491 self
.assertIn("client.{0}".format(authid
), existing_ids
)
2493 # configure credentials for guest client
2494 self
._configure
_guest
_auth
(guest_mount
, authid
, key
)
2496 # mount the subvolume, and write to it
2497 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2498 guest_mount
.write_n_mb("data.bin", 1)
2500 # authorize guest authID read access to subvolume
2501 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid
,
2502 "--group_name", group
, "--tenant_id", "tenant_id", "--access_level", "r")
2504 # guest client sees the change in access level to read only after a
2505 # remount of the subvolume.
2506 guest_mount
.umount_wait()
2507 guest_mount
.mount_wait(cephfs_mntpt
=mount_path
)
2509 # read existing content of the subvolume
2510 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
2511 # cannot write into read-only subvolume
2512 with self
.assertRaises(CommandFailedError
):
2513 guest_mount
.write_n_mb("rogue.bin", 1)
2516 guest_mount
.umount_wait()
2517 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid
,
2518 "--group_name", group
)
2519 # guest authID should no longer exist
2520 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
2521 self
.assertNotIn("client.{0}".format(authid
), existing_ids
)
2522 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2523 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2525 def test_multitenant_subvolumes(self
):
2527 That subvolume access can be restricted to a tenant.
2529 That metadata used to enforce tenant isolation of
2530 subvolumes is stored as a two-way mapping between auth
2531 IDs and subvolumes that they're authorized to access.
2533 subvolume
= self
._generate
_random
_subvolume
_name
()
2534 group
= self
._generate
_random
_group
_name
()
2536 guest_mount
= self
.mount_b
2538 # Guest clients belonging to different tenants, but using the same
2543 "tenant_id": "tenant1",
2547 "tenant_id": "tenant2",
2551 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2553 # create subvolume in group
2554 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2556 # Check that subvolume metadata file is created on subvolume creation.
2557 subvol_metadata_filename
= "_{0}:{1}.meta".format(group
, subvolume
)
2558 self
.assertIn(subvol_metadata_filename
, guest_mount
.ls("volumes"))
2560 # Authorize 'guestclient_1', using auth ID 'alice' and belonging to
2561 # 'tenant1', with 'rw' access to the volume.
2562 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2563 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2565 # Check that auth metadata file for auth ID 'alice', is
2566 # created on authorizing 'alice' access to the subvolume.
2567 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2568 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2570 # Verify that the auth metadata file stores the tenant ID that the
2571 # auth ID belongs to, the auth ID's authorized access levels
2572 # for different subvolumes, versioning details, etc.
2573 expected_auth_metadata
= {
2575 "compat_version": 6,
2577 "tenant_id": "tenant1",
2579 "{0}/{1}".format(group
,subvolume
): {
2581 "access_level": "rw"
2586 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2587 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
2588 del expected_auth_metadata
["version"]
2589 del auth_metadata
["version"]
2590 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
2592 # Verify that the subvolume metadata file stores info about auth IDs
2593 # and their access levels to the subvolume, versioning details, etc.
2594 expected_subvol_metadata
= {
2596 "compat_version": 1,
2600 "access_level": "rw"
2604 subvol_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(subvol_metadata_filename
)))
2606 self
.assertGreaterEqual(subvol_metadata
["version"], expected_subvol_metadata
["version"])
2607 del expected_subvol_metadata
["version"]
2608 del subvol_metadata
["version"]
2609 self
.assertEqual(expected_subvol_metadata
, subvol_metadata
)
2611 # Cannot authorize 'guestclient_2' to access the volume.
2612 # It uses auth ID 'alice', which has already been used by a
2613 # 'guestclient_1' belonging to an another tenant for accessing
2617 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_2
["auth_id"],
2618 "--group_name", group
, "--tenant_id", guestclient_2
["tenant_id"])
2619 except CommandFailedError
as ce
:
2620 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
2621 "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id")
2623 self
.fail("expected the 'fs subvolume authorize' command to fail")
2625 # Check that auth metadata file is cleaned up on removing
2626 # auth ID's only access to a volume.
2628 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
,
2629 "--group_name", group
)
2630 self
.assertNotIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2632 # Check that subvolume metadata file is cleaned up on subvolume deletion.
2633 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2634 self
.assertNotIn(subvol_metadata_filename
, guest_mount
.ls("volumes"))
2637 guest_mount
.umount_wait()
2638 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2640 def test_subvolume_authorized_list(self
):
2641 subvolume
= self
._generate
_random
_subvolume
_name
()
2642 group
= self
._generate
_random
_group
_name
()
2648 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2650 # create subvolume in group
2651 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2653 # authorize alice authID read-write access to subvolume
2654 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid1
,
2655 "--group_name", group
)
2656 # authorize guest1 authID read-write access to subvolume
2657 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid2
,
2658 "--group_name", group
)
2659 # authorize guest2 authID read access to subvolume
2660 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, authid3
,
2661 "--group_name", group
, "--access_level", "r")
2663 # list authorized-ids of the subvolume
2664 expected_auth_list
= [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}]
2665 auth_list
= json
.loads(self
._fs
_cmd
('subvolume', 'authorized_list', self
.volname
, subvolume
, "--group_name", group
))
2666 self
.assertCountEqual(expected_auth_list
, auth_list
)
2669 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid1
,
2670 "--group_name", group
)
2671 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid2
,
2672 "--group_name", group
)
2673 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, authid3
,
2674 "--group_name", group
)
2675 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2676 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2678 def test_authorize_auth_id_not_created_by_mgr_volumes(self
):
2680 If the auth_id already exists and is not created by mgr plugin,
2681 it's not allowed to authorize the auth-id by default.
2684 subvolume
= self
._generate
_random
_subvolume
_name
()
2685 group
= self
._generate
_random
_group
_name
()
2688 self
.fs
.mon_manager
.raw_cluster_cmd(
2689 "auth", "get-or-create", "client.guest1",
2698 "tenant_id": "tenant1",
2702 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2704 # create subvolume in group
2705 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2708 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2709 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2710 except CommandFailedError
as ce
:
2711 self
.assertEqual(ce
.exitstatus
, errno
.EPERM
,
2712 "Invalid error code returned on authorize of subvolume for auth_id created out of band")
2714 self
.fail("expected the 'fs subvolume authorize' command to fail")
2717 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2718 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2719 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2721 def test_authorize_allow_existing_id_option(self
):
2723 If the auth_id already exists and is not created by mgr volumes,
2724 it's not allowed to authorize the auth-id by default but is
2725 allowed with option allow_existing_id.
2728 subvolume
= self
._generate
_random
_subvolume
_name
()
2729 group
= self
._generate
_random
_group
_name
()
2732 self
.fs
.mon_manager
.raw_cluster_cmd(
2733 "auth", "get-or-create", "client.guest1",
2742 "tenant_id": "tenant1",
2746 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2748 # create subvolume in group
2749 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2751 # Cannot authorize 'guestclient_1' to access the volume by default,
2752 # which already exists and not created by mgr volumes but is allowed
2753 # with option 'allow_existing_id'.
2754 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2755 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"], "--allow-existing-id")
2758 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
,
2759 "--group_name", group
)
2760 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2761 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2762 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2764 def test_deauthorize_auth_id_after_out_of_band_update(self
):
2766 If the auth_id authorized by mgr/volumes plugin is updated
2767 out of band, the auth_id should not be deleted after a
2768 deauthorize. It should only remove caps associated with it.
2771 subvolume
= self
._generate
_random
_subvolume
_name
()
2772 group
= self
._generate
_random
_group
_name
()
2777 "tenant_id": "tenant1",
2781 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2783 # create subvolume in group
2784 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2786 # Authorize 'guestclient_1' to access the subvolume.
2787 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2788 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2790 subvol_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
,
2791 "--group_name", group
).rstrip()
2793 # Update caps for guestclient_1 out of band
2794 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
2795 "auth", "caps", "client.guest1",
2796 "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group
, subvol_path
),
2797 "osd", "allow rw pool=cephfs_data",
2802 # Deauthorize guestclient_1
2803 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
, "--group_name", group
)
2805 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
2806 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
2807 out
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
2809 self
.assertEqual("client.guest1", out
[0]["entity"])
2810 self
.assertEqual("allow rw path=/volumes/{0}".format(group
), out
[0]["caps"]["mds"])
2811 self
.assertEqual("allow *", out
[0]["caps"]["mgr"])
2812 self
.assertNotIn("osd", out
[0]["caps"])
2815 out
= self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2816 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2817 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2819 def test_recover_auth_metadata_during_authorize(self
):
2821 That auth metadata manager can recover from partial auth updates using
2822 metadata files, which store auth info and its update status info. This
2823 test validates the recovery during authorize.
2826 guest_mount
= self
.mount_b
2828 subvolume
= self
._generate
_random
_subvolume
_name
()
2829 group
= self
._generate
_random
_group
_name
()
2834 "tenant_id": "tenant1",
2838 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2840 # create subvolume in group
2841 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
2843 # Authorize 'guestclient_1' to access the subvolume.
2844 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2845 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2847 # Check that auth metadata file for auth ID 'guest1', is
2848 # created on authorizing 'guest1' access to the subvolume.
2849 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2850 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2851 expected_auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2853 # Induce partial auth update state by modifying the auth metadata file,
2854 # and then run authorize again.
2855 guest_mount
.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
2857 # Authorize 'guestclient_1' to access the subvolume.
2858 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume
, guestclient_1
["auth_id"],
2859 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2861 auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2862 self
.assertEqual(auth_metadata_content
, expected_auth_metadata_content
)
2865 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume
, auth_id
, "--group_name", group
)
2866 guest_mount
.umount_wait()
2867 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2868 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
2869 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2871 def test_recover_auth_metadata_during_deauthorize(self
):
2873 That auth metadata manager can recover from partial auth updates using
2874 metadata files, which store auth info and its update status info. This
2875 test validates the recovery during deauthorize.
2878 guest_mount
= self
.mount_b
2880 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
2881 group
= self
._generate
_random
_group
_name
()
2884 "auth_id": "guest1",
2885 "tenant_id": "tenant1",
2889 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2891 # create subvolumes in group
2892 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
2893 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
2895 # Authorize 'guestclient_1' to access the subvolume1.
2896 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
2897 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2899 # Check that auth metadata file for auth ID 'guest1', is
2900 # created on authorizing 'guest1' access to the subvolume1.
2901 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2902 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2903 expected_auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2905 # Authorize 'guestclient_1' to access the subvolume2.
2906 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
2907 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2909 # Induce partial auth update state by modifying the auth metadata file,
2910 # and then run de-authorize.
2911 guest_mount
.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
2913 # Deauthorize 'guestclient_1' to access the subvolume2.
2914 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
2915 "--group_name", group
)
2917 auth_metadata_content
= self
._auth
_metadata
_get
(self
.mount_a
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2918 self
.assertEqual(auth_metadata_content
, expected_auth_metadata_content
)
2921 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, "guest1", "--group_name", group
)
2922 guest_mount
.umount_wait()
2923 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2924 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
2925 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
2926 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
2928 def test_update_old_style_auth_metadata_to_new_during_authorize(self
):
2930 CephVolumeClient stores the subvolume data in auth metadata file with
2931 'volumes' key as there was no subvolume namespace. It doesn't makes sense
2932 with mgr/volumes. This test validates the transparent update of 'volumes'
2933 key to 'subvolumes' key in auth metadata file during authorize.
2936 guest_mount
= self
.mount_b
2938 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
2939 group
= self
._generate
_random
_group
_name
()
2944 "tenant_id": "tenant1",
2948 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
2950 # create subvolumes in group
2951 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
2952 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
2954 # Authorize 'guestclient_1' to access the subvolume1.
2955 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
2956 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2958 # Check that auth metadata file for auth ID 'guest1', is
2959 # created on authorizing 'guest1' access to the subvolume1.
2960 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
2961 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
2963 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
2964 guest_mount
.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
2966 # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
2967 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
2968 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
2970 expected_auth_metadata
= {
2972 "compat_version": 6,
2974 "tenant_id": "tenant1",
2976 "{0}/{1}".format(group
,subvolume1
): {
2978 "access_level": "rw"
2980 "{0}/{1}".format(group
,subvolume2
): {
2982 "access_level": "rw"
2987 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
2989 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
2990 del expected_auth_metadata
["version"]
2991 del auth_metadata
["version"]
2992 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
2995 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, auth_id
, "--group_name", group
)
2996 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, auth_id
, "--group_name", group
)
2997 guest_mount
.umount_wait()
2998 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
2999 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
3000 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
3001 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3003 def test_update_old_style_auth_metadata_to_new_during_deauthorize(self
):
3005 CephVolumeClient stores the subvolume data in auth metadata file with
3006 'volumes' key as there was no subvolume namespace. It doesn't makes sense
3007 with mgr/volumes. This test validates the transparent update of 'volumes'
3008 key to 'subvolumes' key in auth metadata file during deauthorize.
3011 guest_mount
= self
.mount_b
3013 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
3014 group
= self
._generate
_random
_group
_name
()
3019 "tenant_id": "tenant1",
3023 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3025 # create subvolumes in group
3026 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
3027 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--group_name", group
)
3029 # Authorize 'guestclient_1' to access the subvolume1.
3030 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume1
, guestclient_1
["auth_id"],
3031 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3033 # Authorize 'guestclient_1' to access the subvolume2.
3034 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolume2
, guestclient_1
["auth_id"],
3035 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3037 # Check that auth metadata file for auth ID 'guest1', is created.
3038 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
3039 self
.assertIn(auth_metadata_filename
, guest_mount
.ls("volumes"))
3041 # Replace 'subvolumes' to 'volumes', old style auth-metadata file
3042 guest_mount
.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename
)], sudo
=True)
3044 # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
3045 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume2
, auth_id
, "--group_name", group
)
3047 expected_auth_metadata
= {
3049 "compat_version": 6,
3051 "tenant_id": "tenant1",
3053 "{0}/{1}".format(group
,subvolume1
): {
3055 "access_level": "rw"
3060 auth_metadata
= self
._auth
_metadata
_get
(guest_mount
.read_file("volumes/{0}".format(auth_metadata_filename
)))
3062 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
3063 del expected_auth_metadata
["version"]
3064 del auth_metadata
["version"]
3065 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
3068 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolume1
, auth_id
, "--group_name", group
)
3069 guest_mount
.umount_wait()
3070 self
.fs
.mon_manager
.raw_cluster_cmd("auth", "rm", "client.guest1")
3071 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
3072 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, "--group_name", group
)
3073 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3075 def test_subvolume_evict_client(self
):
3077 That a subvolume client can be evicted based on the auth ID
3080 subvolumes
= self
._generate
_random
_subvolume
_name
(2)
3081 group
= self
._generate
_random
_group
_name
()
3084 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3086 # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares.
3087 for i
in range(0, 2):
3088 self
.mounts
[i
].umount_wait()
3089 guest_mounts
= (self
.mounts
[0], self
.mounts
[1])
3093 "tenant_id": "tenant1",
3096 # Create two subvolumes. Authorize 'guest' auth ID to mount the two
3097 # subvolumes. Mount the two subvolumes. Write data to the volumes.
3100 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolumes
[i
], "--group_name", group
, "--mode=777")
3102 # authorize guest authID read-write access to subvolume
3103 key
= self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvolumes
[i
], guestclient_1
["auth_id"],
3104 "--group_name", group
, "--tenant_id", guestclient_1
["tenant_id"])
3106 mount_path
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolumes
[i
],
3107 "--group_name", group
).rstrip()
3108 # configure credentials for guest client
3109 self
._configure
_guest
_auth
(guest_mounts
[i
], auth_id
, key
)
3111 # mount the subvolume, and write to it
3112 guest_mounts
[i
].mount_wait(cephfs_mntpt
=mount_path
)
3113 guest_mounts
[i
].write_n_mb("data.bin", 1)
3115 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
3117 self
._fs
_cmd
("subvolume", "evict", self
.volname
, subvolumes
[0], auth_id
, "--group_name", group
)
3119 # Evicted guest client, guest_mounts[0], should not be able to do
3120 # anymore metadata ops. It should start failing all operations
3121 # when it sees that its own address is in the blocklist.
3123 guest_mounts
[0].write_n_mb("rogue.bin", 1)
3124 except CommandFailedError
:
3127 raise RuntimeError("post-eviction write should have failed!")
3129 # The blocklisted guest client should now be unmountable
3130 guest_mounts
[0].umount_wait()
3132 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
3133 # has mounted the other volume, should be able to use its volume
3135 guest_mounts
[1].write_n_mb("data.bin.1", 1)
3138 guest_mounts
[1].umount_wait()
3140 self
._fs
_cmd
("subvolume", "deauthorize", self
.volname
, subvolumes
[i
], auth_id
, "--group_name", group
)
3141 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolumes
[i
], "--group_name", group
)
3142 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3144 def test_subvolume_pin_random(self
):
3145 self
.fs
.set_max_mds(2)
3146 self
.fs
.wait_for_daemons()
3147 self
.config_set('mds', 'mds_export_ephemeral_random', True)
3149 subvolume
= self
._generate
_random
_subvolume
_name
()
3150 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
3151 self
._fs
_cmd
("subvolume", "pin", self
.volname
, subvolume
, "random", ".01")
3155 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3157 # verify trash dir is clean
3158 self
._wait
_for
_trash
_empty
()
3160 def test_subvolume_resize_fail_invalid_size(self
):
3162 That a subvolume cannot be resized to an invalid size and the quota did not change
3165 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3167 subvolname
= self
._generate
_random
_subvolume
_name
()
3168 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
3170 # make sure it exists
3171 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3172 self
.assertNotEqual(subvolpath
, None)
3174 # try to resize the subvolume with an invalid size -10
3177 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3178 except CommandFailedError
as ce
:
3179 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
3181 self
.fail("expected the 'fs subvolume resize' command to fail")
3183 # verify the quota did not change
3184 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3185 self
.assertEqual(size
, osize
)
3188 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3190 # verify trash dir is clean
3191 self
._wait
_for
_trash
_empty
()
3193 def test_subvolume_resize_fail_zero_size(self
):
3195 That a subvolume cannot be resized to a zero size and the quota did not change
3198 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3200 subvolname
= self
._generate
_random
_subvolume
_name
()
3201 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
3203 # make sure it exists
3204 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3205 self
.assertNotEqual(subvolpath
, None)
3207 # try to resize the subvolume with size 0
3210 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3211 except CommandFailedError
as ce
:
3212 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
3214 self
.fail("expected the 'fs subvolume resize' command to fail")
3216 # verify the quota did not change
3217 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3218 self
.assertEqual(size
, osize
)
3221 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3223 # verify trash dir is clean
3224 self
._wait
_for
_trash
_empty
()
3226 def test_subvolume_resize_quota_lt_used_size(self
):
3228 That a subvolume can be resized to a size smaller than the current used size
3229 and the resulting quota matches the expected size.
3232 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
3234 subvolname
= self
._generate
_random
_subvolume
_name
()
3235 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
3237 # make sure it exists
3238 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3239 self
.assertNotEqual(subvolpath
, None)
3241 # create one file of 10MB
3242 file_size
=self
.DEFAULT_FILE_SIZE
*10
3244 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3247 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+1)
3248 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3250 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
3251 susedsize
= int(self
.mount_a
.run_shell(['stat', '-c' '%s', subvolpath
]).stdout
.getvalue().strip())
3252 if isinstance(self
.mount_a
, FuseMount
):
3253 # kclient dir does not have size==rbytes
3254 self
.assertEqual(usedsize
, susedsize
)
3256 # shrink the subvolume
3257 nsize
= usedsize
// 2
3259 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3260 except CommandFailedError
:
3261 self
.fail("expected the 'fs subvolume resize' command to succeed")
3264 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3265 self
.assertEqual(size
, nsize
)
3268 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3270 # verify trash dir is clean
3271 self
._wait
_for
_trash
_empty
()
3273 def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self
):
3275 That a subvolume cannot be resized to a size smaller than the current used size
3276 when --no_shrink is given and the quota did not change.
3279 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
3281 subvolname
= self
._generate
_random
_subvolume
_name
()
3282 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
3284 # make sure it exists
3285 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3286 self
.assertNotEqual(subvolpath
, None)
3288 # create one file of 10MB
3289 file_size
=self
.DEFAULT_FILE_SIZE
*10
3291 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3294 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+2)
3295 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3297 usedsize
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.dir.rbytes"))
3298 susedsize
= int(self
.mount_a
.run_shell(['stat', '-c' '%s', subvolpath
]).stdout
.getvalue().strip())
3299 if isinstance(self
.mount_a
, FuseMount
):
3300 # kclient dir does not have size==rbytes
3301 self
.assertEqual(usedsize
, susedsize
)
3303 # shrink the subvolume
3304 nsize
= usedsize
// 2
3306 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
), "--no_shrink")
3307 except CommandFailedError
as ce
:
3308 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on resize of subvolume with invalid size")
3310 self
.fail("expected the 'fs subvolume resize' command to fail")
3312 # verify the quota did not change
3313 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3314 self
.assertEqual(size
, osize
)
3317 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3319 # verify trash dir is clean
3320 self
._wait
_for
_trash
_empty
()
3322 def test_subvolume_resize_expand_on_full_subvolume(self
):
3324 That the subvolume can be expanded from a full subvolume and future writes succeed.
3327 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*10
3328 # create subvolume of quota 10MB and make sure it exists
3329 subvolname
= self
._generate
_random
_subvolume
_name
()
3330 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
), "--mode=777")
3331 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3332 self
.assertNotEqual(subvolpath
, None)
3334 # create one file of size 10MB and write
3335 file_size
=self
.DEFAULT_FILE_SIZE
*10
3337 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3340 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+3)
3341 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3343 # create a file of size 5MB and try write more
3344 file_size
=file_size
// 2
3346 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3349 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+4)
3351 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3352 except CommandFailedError
:
3353 # Not able to write. So expand the subvolume more and try writing the 5MB file again
3355 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3357 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3358 except CommandFailedError
:
3359 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3360 "to succeed".format(subvolname
, number_of_files
, file_size
))
3362 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB"
3363 "to fail".format(subvolname
, number_of_files
, file_size
))
3366 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3368 # verify trash dir is clean
3369 self
._wait
_for
_trash
_empty
()
3371 def test_subvolume_resize_infinite_size(self
):
3373 That a subvolume can be resized to an infinite size by unsetting its quota.
3377 subvolname
= self
._generate
_random
_subvolume
_name
()
3378 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size",
3379 str(self
.DEFAULT_FILE_SIZE
*1024*1024))
3381 # make sure it exists
3382 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3383 self
.assertNotEqual(subvolpath
, None)
3386 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, "inf")
3388 # verify that the quota is None
3389 size
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes")
3390 self
.assertEqual(size
, None)
3393 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3395 # verify trash dir is clean
3396 self
._wait
_for
_trash
_empty
()
3398 def test_subvolume_resize_infinite_size_future_writes(self
):
3400 That a subvolume can be resized to an infinite size and the future writes succeed.
3404 subvolname
= self
._generate
_random
_subvolume
_name
()
3405 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size",
3406 str(self
.DEFAULT_FILE_SIZE
*1024*1024*5), "--mode=777")
3408 # make sure it exists
3409 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3410 self
.assertNotEqual(subvolpath
, None)
3413 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, "inf")
3415 # verify that the quota is None
3416 size
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes")
3417 self
.assertEqual(size
, None)
3419 # create one file of 10MB and try to write
3420 file_size
=self
.DEFAULT_FILE_SIZE
*10
3422 log
.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname
,
3425 filename
= "{0}.{1}".format(TestVolumes
.TEST_FILE_NAME_PREFIX
, self
.DEFAULT_NUMBER_OF_FILES
+5)
3428 self
.mount_a
.write_n_mb(os
.path
.join(subvolpath
, filename
), file_size
)
3429 except CommandFailedError
:
3430 self
.fail("expected filling subvolume {0} with {1} file of size {2}MB "
3431 "to succeed".format(subvolname
, number_of_files
, file_size
))
3434 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3436 # verify trash dir is clean
3437 self
._wait
_for
_trash
_empty
()
3439 def test_subvolume_rm_force(self
):
3440 # test removing non-existing subvolume with --force
3441 subvolume
= self
._generate
_random
_subvolume
_name
()
3443 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--force")
3444 except CommandFailedError
:
3445 self
.fail("expected the 'fs subvolume rm --force' command to succeed")
3447 def test_subvolume_exists_with_subvolumegroup_and_subvolume(self
):
3448 """Test the presence of any subvolume by specifying the name of subvolumegroup"""
3450 group
= self
._generate
_random
_group
_name
()
3451 subvolume1
= self
._generate
_random
_subvolume
_name
()
3452 # create subvolumegroup
3453 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3454 # create subvolume in group
3455 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--group_name", group
)
3456 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
, "--group_name", group
)
3457 self
.assertEqual(ret
.strip('\n'), "subvolume exists")
3458 # delete subvolume in group
3459 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
, "--group_name", group
)
3460 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
, "--group_name", group
)
3461 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3462 # delete subvolumegroup
3463 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3465 def test_subvolume_exists_with_subvolumegroup_and_no_subvolume(self
):
3466 """Test the presence of any subvolume specifying the name
3467 of subvolumegroup and no subvolumes"""
3469 group
= self
._generate
_random
_group
_name
()
3470 # create subvolumegroup
3471 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3472 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
, "--group_name", group
)
3473 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3474 # delete subvolumegroup
3475 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3477 def test_subvolume_exists_without_subvolumegroup_and_with_subvolume(self
):
3478 """Test the presence of any subvolume without specifying the name
3479 of subvolumegroup"""
3481 subvolume1
= self
._generate
_random
_subvolume
_name
()
3483 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
)
3484 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
)
3485 self
.assertEqual(ret
.strip('\n'), "subvolume exists")
3487 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
3488 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
)
3489 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3491 def test_subvolume_exists_without_subvolumegroup_and_without_subvolume(self
):
3492 """Test the presence of any subvolume without any subvolumegroup
3493 and without any subvolume"""
3495 ret
= self
._fs
_cmd
("subvolume", "exist", self
.volname
)
3496 self
.assertEqual(ret
.strip('\n'), "no subvolume exists")
3498 def test_subvolume_shrink(self
):
3500 That a subvolume can be shrinked in size and its quota matches the expected size.
3504 subvolname
= self
._generate
_random
_subvolume
_name
()
3505 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024
3506 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--size", str(osize
))
3508 # make sure it exists
3509 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolname
)
3510 self
.assertNotEqual(subvolpath
, None)
3512 # shrink the subvolume
3514 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolname
, str(nsize
))
3517 size
= int(self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_bytes"))
3518 self
.assertEqual(size
, nsize
)
3521 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
)
3523 # verify trash dir is clean
3524 self
._wait
_for
_trash
_empty
()
3526 def test_subvolume_retain_snapshot_rm_idempotency(self
):
3528 ensure subvolume deletion of a subvolume which is already deleted with retain snapshots option passes.
3529 After subvolume deletion with retain snapshots, the subvolume exists until the trash directory (resides inside subvolume)
3530 is cleaned up. The subvolume deletion issued while the trash directory is not empty, should pass and should
3531 not error out with EAGAIN.
3533 subvolume
= self
._generate
_random
_subvolume
_name
()
3534 snapshot
= self
._generate
_random
_snapshot
_name
()
3537 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
3540 self
._do
_subvolume
_io
(subvolume
, number_of_files
=256)
3542 # snapshot subvolume
3543 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
3545 # remove with snapshot retention
3546 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
3548 # remove snapshots (removes retained volume)
3549 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
3551 # remove subvolume (check idempotency)
3553 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
3554 except CommandFailedError
as ce
:
3555 if ce
.exitstatus
!= errno
.ENOENT
:
3556 self
.fail(f
"expected subvolume rm to pass with error: {os.strerror(ce.exitstatus)}")
3558 # verify trash dir is clean
3559 self
._wait
_for
_trash
_empty
()
3562 def test_subvolume_user_metadata_set(self
):
3563 subvolname
= self
._generate
_random
_subvolume
_name
()
3564 group
= self
._generate
_random
_group
_name
()
3567 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3569 # create subvolume in group.
3570 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3572 # set metadata for subvolume.
3576 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3577 except CommandFailedError
:
3578 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
3580 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3581 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3583 # verify trash dir is clean.
3584 self
._wait
_for
_trash
_empty
()
3586 def test_subvolume_user_metadata_set_idempotence(self
):
3587 subvolname
= self
._generate
_random
_subvolume
_name
()
3588 group
= self
._generate
_random
_group
_name
()
3591 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3593 # create subvolume in group.
3594 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3596 # set metadata for subvolume.
3600 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3601 except CommandFailedError
:
3602 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
3604 # set same metadata again for subvolume.
3606 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3607 except CommandFailedError
:
3608 self
.fail("expected the 'fs subvolume metadata set' command to succeed because it is idempotent operation")
3610 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3611 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3613 # verify trash dir is clean.
3614 self
._wait
_for
_trash
_empty
()
3616 def test_subvolume_user_metadata_get(self
):
3617 subvolname
= self
._generate
_random
_subvolume
_name
()
3618 group
= self
._generate
_random
_group
_name
()
3621 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3623 # create subvolume in group.
3624 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3626 # set metadata for subvolume.
3629 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3631 # get value for specified key.
3633 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3634 except CommandFailedError
:
3635 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
3637 # remove '\n' from returned value.
3638 ret
= ret
.strip('\n')
3640 # match received value with expected value.
3641 self
.assertEqual(value
, ret
)
3643 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3644 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3646 # verify trash dir is clean.
3647 self
._wait
_for
_trash
_empty
()
3649 def test_subvolume_user_metadata_get_for_nonexisting_key(self
):
3650 subvolname
= self
._generate
_random
_subvolume
_name
()
3651 group
= self
._generate
_random
_group
_name
()
3654 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3656 # create subvolume in group.
3657 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3659 # set metadata for subvolume.
3662 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3664 # try to get value for nonexisting key
3665 # Expecting ENOENT exit status because key does not exist
3667 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key_nonexist", "--group_name", group
)
3668 except CommandFailedError
as e
:
3669 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3671 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
3673 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3674 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3676 # verify trash dir is clean.
3677 self
._wait
_for
_trash
_empty
()
3679 def test_subvolume_user_metadata_get_for_nonexisting_section(self
):
3680 subvolname
= self
._generate
_random
_subvolume
_name
()
3681 group
= self
._generate
_random
_group
_name
()
3684 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3686 # create subvolume in group.
3687 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3689 # try to get value for nonexisting key (as section does not exist)
3690 # Expecting ENOENT exit status because key does not exist
3692 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key", "--group_name", group
)
3693 except CommandFailedError
as e
:
3694 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3696 self
.fail("Expected ENOENT because section does not exist")
3698 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3699 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3701 # verify trash dir is clean.
3702 self
._wait
_for
_trash
_empty
()
3704 def test_subvolume_user_metadata_update(self
):
3705 subvolname
= self
._generate
_random
_subvolume
_name
()
3706 group
= self
._generate
_random
_group
_name
()
3709 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3711 # create subvolume in group.
3712 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3714 # set metadata for subvolume.
3717 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3719 # update metadata against key.
3720 new_value
= "new_value"
3721 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, new_value
, "--group_name", group
)
3723 # get metadata for specified key of subvolume.
3725 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3726 except CommandFailedError
:
3727 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
3729 # remove '\n' from returned value.
3730 ret
= ret
.strip('\n')
3732 # match received value with expected value.
3733 self
.assertEqual(new_value
, ret
)
3735 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3736 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3738 # verify trash dir is clean.
3739 self
._wait
_for
_trash
_empty
()
3741 def test_subvolume_user_metadata_list(self
):
3742 subvolname
= self
._generate
_random
_subvolume
_name
()
3743 group
= self
._generate
_random
_group
_name
()
3746 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3748 # create subvolume in group.
3749 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3751 # set metadata for subvolume.
3752 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
3754 for k
, v
in input_metadata_dict
.items():
3755 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, k
, v
, "--group_name", group
)
3759 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
3760 except CommandFailedError
:
3761 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
3763 ret_dict
= json
.loads(ret
)
3765 # compare output with expected output
3766 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
3768 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3769 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3771 # verify trash dir is clean.
3772 self
._wait
_for
_trash
_empty
()
3774 def test_subvolume_user_metadata_list_if_no_metadata_set(self
):
3775 subvolname
= self
._generate
_random
_subvolume
_name
()
3776 group
= self
._generate
_random
_group
_name
()
3779 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3781 # create subvolume in group.
3782 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3786 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
3787 except CommandFailedError
:
3788 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
3790 # remove '\n' from returned value.
3791 ret
= ret
.strip('\n')
3793 # compare output with expected output
3794 # expecting empty json/dictionary
3795 self
.assertEqual(ret
, "{}")
3797 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3798 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3800 # verify trash dir is clean.
3801 self
._wait
_for
_trash
_empty
()
3803 def test_subvolume_user_metadata_remove(self
):
3804 subvolname
= self
._generate
_random
_subvolume
_name
()
3805 group
= self
._generate
_random
_group
_name
()
3808 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3810 # create subvolume in group.
3811 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3813 # set metadata for subvolume.
3816 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3818 # remove metadata against specified key.
3820 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
)
3821 except CommandFailedError
:
3822 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
3824 # confirm key is removed by again fetching metadata
3826 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3827 except CommandFailedError
as e
:
3828 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3830 self
.fail("Expected ENOENT because key does not exist")
3832 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3833 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3835 # verify trash dir is clean.
3836 self
._wait
_for
_trash
_empty
()
3838 def test_subvolume_user_metadata_remove_for_nonexisting_key(self
):
3839 subvolname
= self
._generate
_random
_subvolume
_name
()
3840 group
= self
._generate
_random
_group
_name
()
3843 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3845 # create subvolume in group.
3846 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3848 # set metadata for subvolume.
3851 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3853 # try to remove value for nonexisting key
3854 # Expecting ENOENT exit status because key does not exist
3856 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key_nonexist", "--group_name", group
)
3857 except CommandFailedError
as e
:
3858 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3860 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
3862 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3863 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3865 # verify trash dir is clean.
3866 self
._wait
_for
_trash
_empty
()
3868 def test_subvolume_user_metadata_remove_for_nonexisting_section(self
):
3869 subvolname
= self
._generate
_random
_subvolume
_name
()
3870 group
= self
._generate
_random
_group
_name
()
3873 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3875 # create subvolume in group.
3876 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3878 # try to remove value for nonexisting key (as section does not exist)
3879 # Expecting ENOENT exit status because key does not exist
3881 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key", "--group_name", group
)
3882 except CommandFailedError
as e
:
3883 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3885 self
.fail("Expected ENOENT because section does not exist")
3887 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3888 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3890 # verify trash dir is clean.
3891 self
._wait
_for
_trash
_empty
()
3893 def test_subvolume_user_metadata_remove_force(self
):
3894 subvolname
= self
._generate
_random
_subvolume
_name
()
3895 group
= self
._generate
_random
_group
_name
()
3898 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3900 # create subvolume in group.
3901 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3903 # set metadata for subvolume.
3906 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3908 # remove metadata against specified key with --force option.
3910 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
, "--force")
3911 except CommandFailedError
:
3912 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
3914 # confirm key is removed by again fetching metadata
3916 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3917 except CommandFailedError
as e
:
3918 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3920 self
.fail("Expected ENOENT because key does not exist")
3922 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3923 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3925 # verify trash dir is clean.
3926 self
._wait
_for
_trash
_empty
()
3928 def test_subvolume_user_metadata_remove_force_for_nonexisting_key(self
):
3929 subvolname
= self
._generate
_random
_subvolume
_name
()
3930 group
= self
._generate
_random
_group
_name
()
3933 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
3935 # create subvolume in group.
3936 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, "--group_name", group
)
3938 # set metadata for subvolume.
3941 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3943 # remove metadata against specified key.
3945 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
)
3946 except CommandFailedError
:
3947 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
3949 # confirm key is removed by again fetching metadata
3951 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3952 except CommandFailedError
as e
:
3953 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
3955 self
.fail("Expected ENOENT because key does not exist")
3957 # again remove metadata against already removed key with --force option.
3959 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, key
, "--group_name", group
, "--force")
3960 except CommandFailedError
:
3961 self
.fail("expected the 'fs subvolume metadata rm' (with --force) command to succeed")
3963 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3964 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
3966 # verify trash dir is clean.
3967 self
._wait
_for
_trash
_empty
()
3969 def test_subvolume_user_metadata_set_and_get_for_legacy_subvolume(self
):
3970 subvolname
= self
._generate
_random
_subvolume
_name
()
3971 group
= self
._generate
_random
_group
_name
()
3973 # emulate a old-fashioned subvolume in a custom group
3974 createpath
= os
.path
.join(".", "volumes", group
, subvolname
)
3975 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
3977 # set metadata for subvolume.
3981 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, key
, value
, "--group_name", group
)
3982 except CommandFailedError
:
3983 self
.fail("expected the 'fs subvolume metadata set' command to succeed")
3985 # get value for specified key.
3987 ret
= self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, key
, "--group_name", group
)
3988 except CommandFailedError
:
3989 self
.fail("expected the 'fs subvolume metadata get' command to succeed")
3991 # remove '\n' from returned value.
3992 ret
= ret
.strip('\n')
3994 # match received value with expected value.
3995 self
.assertEqual(value
, ret
)
3997 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
3998 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4000 # verify trash dir is clean.
4001 self
._wait
_for
_trash
_empty
()
4003 def test_subvolume_user_metadata_list_and_remove_for_legacy_subvolume(self
):
4004 subvolname
= self
._generate
_random
_subvolume
_name
()
4005 group
= self
._generate
_random
_group
_name
()
4007 # emulate a old-fashioned subvolume in a custom group
4008 createpath
= os
.path
.join(".", "volumes", group
, subvolname
)
4009 self
.mount_a
.run_shell(['mkdir', '-p', createpath
], sudo
=True)
4011 # set metadata for subvolume.
4012 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
4014 for k
, v
in input_metadata_dict
.items():
4015 self
._fs
_cmd
("subvolume", "metadata", "set", self
.volname
, subvolname
, k
, v
, "--group_name", group
)
4019 ret
= self
._fs
_cmd
("subvolume", "metadata", "ls", self
.volname
, subvolname
, "--group_name", group
)
4020 except CommandFailedError
:
4021 self
.fail("expected the 'fs subvolume metadata ls' command to succeed")
4023 ret_dict
= json
.loads(ret
)
4025 # compare output with expected output
4026 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
4028 # remove metadata against specified key.
4030 self
._fs
_cmd
("subvolume", "metadata", "rm", self
.volname
, subvolname
, "key_1", "--group_name", group
)
4031 except CommandFailedError
:
4032 self
.fail("expected the 'fs subvolume metadata rm' command to succeed")
4034 # confirm key is removed by again fetching metadata
4036 self
._fs
_cmd
("subvolume", "metadata", "get", self
.volname
, subvolname
, "key_1", "--group_name", group
)
4037 except CommandFailedError
as e
:
4038 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
4040 self
.fail("Expected ENOENT because key_1 does not exist")
4042 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4043 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4045 # verify trash dir is clean.
4046 self
._wait
_for
_trash
_empty
()
4048 class TestSubvolumeGroupSnapshots(TestVolumesHelper
):
4049 """Tests for FS subvolume group snapshot operations."""
4050 @unittest.skip("skipping subvolumegroup snapshot tests")
4051 def test_nonexistent_subvolume_group_snapshot_rm(self
):
4052 subvolume
= self
._generate
_random
_subvolume
_name
()
4053 group
= self
._generate
_random
_group
_name
()
4054 snapshot
= self
._generate
_random
_snapshot
_name
()
4057 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4059 # create subvolume in group
4060 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4063 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4066 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4070 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4071 except CommandFailedError
as ce
:
4072 if ce
.exitstatus
!= errno
.ENOENT
:
4075 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail")
4078 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4080 # verify trash dir is clean
4081 self
._wait
_for
_trash
_empty
()
4084 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4086 @unittest.skip("skipping subvolumegroup snapshot tests")
4087 def test_subvolume_group_snapshot_create_and_rm(self
):
4088 subvolume
= self
._generate
_random
_subvolume
_name
()
4089 group
= self
._generate
_random
_group
_name
()
4090 snapshot
= self
._generate
_random
_snapshot
_name
()
4093 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4095 # create subvolume in group
4096 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4099 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4102 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4105 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4107 # verify trash dir is clean
4108 self
._wait
_for
_trash
_empty
()
4111 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4113 @unittest.skip("skipping subvolumegroup snapshot tests")
4114 def test_subvolume_group_snapshot_idempotence(self
):
4115 subvolume
= self
._generate
_random
_subvolume
_name
()
4116 group
= self
._generate
_random
_group
_name
()
4117 snapshot
= self
._generate
_random
_snapshot
_name
()
4120 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4122 # create subvolume in group
4123 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4126 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4128 # try creating snapshot w/ same snapshot name -- shoule be idempotent
4129 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4132 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
)
4135 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4137 # verify trash dir is clean
4138 self
._wait
_for
_trash
_empty
()
4141 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4143 @unittest.skip("skipping subvolumegroup snapshot tests")
4144 def test_subvolume_group_snapshot_ls(self
):
4145 # tests the 'fs subvolumegroup snapshot ls' command
4150 group
= self
._generate
_random
_group
_name
()
4151 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4153 # create subvolumegroup snapshots
4154 snapshots
= self
._generate
_random
_snapshot
_name
(3)
4155 for snapshot
in snapshots
:
4156 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4158 subvolgrpsnapshotls
= json
.loads(self
._fs
_cmd
('subvolumegroup', 'snapshot', 'ls', self
.volname
, group
))
4159 if len(subvolgrpsnapshotls
) == 0:
4160 raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots")
4162 snapshotnames
= [snapshot
['name'] for snapshot
in subvolgrpsnapshotls
]
4163 if collections
.Counter(snapshotnames
) != collections
.Counter(snapshots
):
4164 raise RuntimeError("Error creating or listing subvolume group snapshots")
4166 @unittest.skip("skipping subvolumegroup snapshot tests")
4167 def test_subvolume_group_snapshot_rm_force(self
):
4168 # test removing non-existing subvolume group snapshot with --force
4169 group
= self
._generate
_random
_group
_name
()
4170 snapshot
= self
._generate
_random
_snapshot
_name
()
4173 self
._fs
_cmd
("subvolumegroup", "snapshot", "rm", self
.volname
, group
, snapshot
, "--force")
4174 except CommandFailedError
:
4175 raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed")
4177 def test_subvolume_group_snapshot_unsupported_status(self
):
4178 group
= self
._generate
_random
_group
_name
()
4179 snapshot
= self
._generate
_random
_snapshot
_name
()
4182 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4186 self
._fs
_cmd
("subvolumegroup", "snapshot", "create", self
.volname
, group
, snapshot
)
4187 except CommandFailedError
as ce
:
4188 self
.assertEqual(ce
.exitstatus
, errno
.ENOSYS
, "invalid error code on subvolumegroup snapshot create")
4190 self
.fail("expected subvolumegroup snapshot create command to fail")
4193 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4196 class TestSubvolumeSnapshots(TestVolumesHelper
):
4197 """Tests for FS subvolume snapshot operations."""
4198 def test_nonexistent_subvolume_snapshot_rm(self
):
4199 subvolume
= self
._generate
_random
_subvolume
_name
()
4200 snapshot
= self
._generate
_random
_snapshot
_name
()
4203 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4205 # snapshot subvolume
4206 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4209 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4211 # remove snapshot again
4213 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4214 except CommandFailedError
as ce
:
4215 if ce
.exitstatus
!= errno
.ENOENT
:
4218 raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail")
4221 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4223 # verify trash dir is clean
4224 self
._wait
_for
_trash
_empty
()
4226 def test_subvolume_snapshot_create_and_rm(self
):
4227 subvolume
= self
._generate
_random
_subvolume
_name
()
4228 snapshot
= self
._generate
_random
_snapshot
_name
()
4231 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4233 # snapshot subvolume
4234 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4237 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4240 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4242 # verify trash dir is clean
4243 self
._wait
_for
_trash
_empty
()
4245 def test_subvolume_snapshot_create_idempotence(self
):
4246 subvolume
= self
._generate
_random
_subvolume
_name
()
4247 snapshot
= self
._generate
_random
_snapshot
_name
()
4250 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4252 # snapshot subvolume
4253 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4255 # try creating w/ same subvolume snapshot name -- should be idempotent
4256 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4259 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4262 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4264 # verify trash dir is clean
4265 self
._wait
_for
_trash
_empty
()
4267 def test_subvolume_snapshot_info(self
):
4270 tests the 'fs subvolume snapshot info' command
4273 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
4275 subvolume
= self
._generate
_random
_subvolume
_name
()
4276 snapshot
, snap_missing
= self
._generate
_random
_snapshot
_name
(2)
4279 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4282 self
._do
_subvolume
_io
(subvolume
, number_of_files
=1)
4284 # snapshot subvolume
4285 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4287 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
4289 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
4290 self
.assertEqual(snap_info
["has_pending_clones"], "no")
4292 # snapshot info for non-existent snapshot
4294 self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snap_missing
)
4295 except CommandFailedError
as ce
:
4296 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on snapshot info of non-existent snapshot")
4298 self
.fail("expected snapshot info of non-existent snapshot to fail")
4301 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4304 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4306 # verify trash dir is clean
4307 self
._wait
_for
_trash
_empty
()
4309 def test_subvolume_snapshot_in_group(self
):
4310 subvolume
= self
._generate
_random
_subvolume
_name
()
4311 group
= self
._generate
_random
_group
_name
()
4312 snapshot
= self
._generate
_random
_snapshot
_name
()
4315 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4317 # create subvolume in group
4318 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4320 # snapshot subvolume in group
4321 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
4324 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
4327 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4329 # verify trash dir is clean
4330 self
._wait
_for
_trash
_empty
()
4333 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4335 def test_subvolume_snapshot_ls(self
):
4336 # tests the 'fs subvolume snapshot ls' command
4341 subvolume
= self
._generate
_random
_subvolume
_name
()
4342 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4344 # create subvolume snapshots
4345 snapshots
= self
._generate
_random
_snapshot
_name
(3)
4346 for snapshot
in snapshots
:
4347 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4349 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
4350 if len(subvolsnapshotls
) == 0:
4351 self
.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots")
4353 snapshotnames
= [snapshot
['name'] for snapshot
in subvolsnapshotls
]
4354 if collections
.Counter(snapshotnames
) != collections
.Counter(snapshots
):
4355 self
.fail("Error creating or listing subvolume snapshots")
4358 for snapshot
in snapshots
:
4359 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4362 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4364 # verify trash dir is clean
4365 self
._wait
_for
_trash
_empty
()
4367 def test_subvolume_inherited_snapshot_ls(self
):
4368 # tests the scenario where 'fs subvolume snapshot ls' command
4369 # should not list inherited snapshots created as part of snapshot
4370 # at ancestral level
4373 subvolume
= self
._generate
_random
_subvolume
_name
()
4374 group
= self
._generate
_random
_group
_name
()
4378 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4380 # create subvolume in group
4381 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4383 # create subvolume snapshots
4384 snapshots
= self
._generate
_random
_snapshot
_name
(snap_count
)
4385 for snapshot
in snapshots
:
4386 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
4388 # Create snapshot at ancestral level
4389 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", "ancestral_snap_1")
4390 ancestral_snappath2
= os
.path
.join(".", "volumes", group
, ".snap", "ancestral_snap_2")
4391 self
.mount_a
.run_shell(['mkdir', '-p', ancestral_snappath1
, ancestral_snappath2
], sudo
=True)
4393 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
, group
))
4394 self
.assertEqual(len(subvolsnapshotls
), snap_count
)
4396 # remove ancestral snapshots
4397 self
.mount_a
.run_shell(['rmdir', ancestral_snappath1
, ancestral_snappath2
], sudo
=True)
4400 for snapshot
in snapshots
:
4401 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
4404 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4406 # verify trash dir is clean
4407 self
._wait
_for
_trash
_empty
()
4410 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4412 def test_subvolume_inherited_snapshot_info(self
):
4414 tests the scenario where 'fs subvolume snapshot info' command
4415 should fail for inherited snapshots created as part of snapshot
4419 subvolume
= self
._generate
_random
_subvolume
_name
()
4420 group
= self
._generate
_random
_group
_name
()
4423 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4425 # create subvolume in group
4426 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4428 # Create snapshot at ancestral level
4429 ancestral_snap_name
= "ancestral_snap_1"
4430 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", ancestral_snap_name
)
4431 self
.mount_a
.run_shell(['mkdir', '-p', ancestral_snappath1
], sudo
=True)
4433 # Validate existence of inherited snapshot
4434 group_path
= os
.path
.join(".", "volumes", group
)
4435 inode_number_group_dir
= int(self
.mount_a
.run_shell(['stat', '-c' '%i', group_path
]).stdout
.getvalue().strip())
4436 inherited_snap
= "_{0}_{1}".format(ancestral_snap_name
, inode_number_group_dir
)
4437 inherited_snappath
= os
.path
.join(".", "volumes", group
, subvolume
,".snap", inherited_snap
)
4438 self
.mount_a
.run_shell(['ls', inherited_snappath
])
4440 # snapshot info on inherited snapshot
4442 self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, inherited_snap
, group
)
4443 except CommandFailedError
as ce
:
4444 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on snapshot info of inherited snapshot")
4446 self
.fail("expected snapshot info of inherited snapshot to fail")
4448 # remove ancestral snapshots
4449 self
.mount_a
.run_shell(['rmdir', ancestral_snappath1
], sudo
=True)
4452 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--group_name", group
)
4454 # verify trash dir is clean
4455 self
._wait
_for
_trash
_empty
()
4458 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4460 def test_subvolume_inherited_snapshot_rm(self
):
4462 tests the scenario where 'fs subvolume snapshot rm' command
4463 should fail for inherited snapshots created as part of snapshot
4467 subvolume
= self
._generate
_random
_subvolume
_name
()
4468 group
= self
._generate
_random
_group
_name
()
4471 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4473 # create subvolume in group
4474 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4476 # Create snapshot at ancestral level
4477 ancestral_snap_name
= "ancestral_snap_1"
4478 ancestral_snappath1
= os
.path
.join(".", "volumes", group
, ".snap", ancestral_snap_name
)
4479 self
.mount_a
.run_shell(['mkdir', '-p', ancestral_snappath1
], sudo
=True)
4481 # Validate existence of inherited snap
4482 group_path
= os
.path
.join(".", "volumes", group
)
4483 inode_number_group_dir
= int(self
.mount_a
.run_shell(['stat', '-c' '%i', group_path
]).stdout
.getvalue().strip())
4484 inherited_snap
= "_{0}_{1}".format(ancestral_snap_name
, inode_number_group_dir
)
4485 inherited_snappath
= os
.path
.join(".", "volumes", group
, subvolume
,".snap", inherited_snap
)
4486 self
.mount_a
.run_shell(['ls', inherited_snappath
])
4488 # inherited snapshot should not be deletable
4490 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, inherited_snap
, "--group_name", group
)
4491 except CommandFailedError
as ce
:
4492 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, msg
="invalid error code when removing inherited snapshot")
4494 self
.fail("expected removing inheirted snapshot to fail")
4496 # remove ancestral snapshots
4497 self
.mount_a
.run_shell(['rmdir', ancestral_snappath1
], sudo
=True)
4500 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4502 # verify trash dir is clean
4503 self
._wait
_for
_trash
_empty
()
4506 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4508 def test_subvolume_subvolumegroup_snapshot_name_conflict(self
):
4510 tests the scenario where creation of subvolume snapshot name
4511 with same name as it's subvolumegroup snapshot name. This should
4515 subvolume
= self
._generate
_random
_subvolume
_name
()
4516 group
= self
._generate
_random
_group
_name
()
4517 group_snapshot
= self
._generate
_random
_snapshot
_name
()
4520 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4522 # create subvolume in group
4523 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--group_name", group
)
4525 # Create subvolumegroup snapshot
4526 group_snapshot_path
= os
.path
.join(".", "volumes", group
, ".snap", group_snapshot
)
4527 self
.mount_a
.run_shell(['mkdir', '-p', group_snapshot_path
], sudo
=True)
4529 # Validate existence of subvolumegroup snapshot
4530 self
.mount_a
.run_shell(['ls', group_snapshot_path
])
4532 # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail
4534 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, group_snapshot
, "--group_name", group
)
4535 except CommandFailedError
as ce
:
4536 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, msg
="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot")
4538 self
.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
4540 # remove subvolumegroup snapshot
4541 self
.mount_a
.run_shell(['rmdir', group_snapshot_path
], sudo
=True)
4544 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
4546 # verify trash dir is clean
4547 self
._wait
_for
_trash
_empty
()
4550 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4552 def test_subvolume_retain_snapshot_invalid_recreate(self
):
4554 ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash
4556 subvolume
= self
._generate
_random
_subvolume
_name
()
4557 snapshot
= self
._generate
_random
_snapshot
_name
()
4560 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4562 # snapshot subvolume
4563 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4565 # remove with snapshot retention
4566 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4568 # recreate subvolume with an invalid pool
4569 data_pool
= "invalid_pool"
4571 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--pool_layout", data_pool
)
4572 except CommandFailedError
as ce
:
4573 self
.assertEqual(ce
.exitstatus
, errno
.EINVAL
, "invalid error code on recreate of subvolume with invalid poolname")
4575 self
.fail("expected recreate of subvolume with invalid poolname to fail")
4578 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4579 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
4580 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4584 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
4585 except CommandFailedError
as ce
:
4586 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of subvolume with retained snapshots")
4588 self
.fail("expected getpath of subvolume with retained snapshots to fail")
4590 # remove snapshot (should remove volume)
4591 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4593 # verify trash dir is clean
4594 self
._wait
_for
_trash
_empty
()
4596 def test_subvolume_retain_snapshot_recreate_subvolume(self
):
4598 ensure a retained subvolume can be recreated and further snapshotted
4600 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
4602 subvolume
= self
._generate
_random
_subvolume
_name
()
4603 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
4606 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4608 # snapshot subvolume
4609 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
4611 # remove with snapshot retention
4612 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4615 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4616 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
4617 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4619 # recreate retained subvolume
4620 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4623 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4624 self
.assertEqual(subvol_info
["state"], "complete",
4625 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4627 # snapshot info (older snapshot)
4628 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot1
))
4630 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
4631 self
.assertEqual(snap_info
["has_pending_clones"], "no")
4633 # snap-create (new snapshot)
4634 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot2
)
4636 # remove with retain snapshots
4637 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4640 subvolsnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
4641 self
.assertEqual(len(subvolsnapshotls
), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
4642 " created subvolume snapshots")
4643 snapshotnames
= [snapshot
['name'] for snapshot
in subvolsnapshotls
]
4644 for snap
in [snapshot1
, snapshot2
]:
4645 self
.assertIn(snap
, snapshotnames
, "Missing snapshot '{0}' in snapshot list".format(snap
))
4647 # remove snapshots (should remove volume)
4648 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
4649 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot2
)
4651 # verify list subvolumes returns an empty list
4652 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4653 self
.assertEqual(len(subvolumels
), 0)
4655 # verify trash dir is clean
4656 self
._wait
_for
_trash
_empty
()
4658 def test_subvolume_retain_snapshot_with_snapshots(self
):
4660 ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
4661 also test allowed and dis-allowed operations on a retained subvolume
4663 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
4665 subvolume
= self
._generate
_random
_subvolume
_name
()
4666 snapshot
= self
._generate
_random
_snapshot
_name
()
4669 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4671 # snapshot subvolume
4672 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4674 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4676 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4677 except CommandFailedError
as ce
:
4678 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of retained subvolume with snapshots")
4680 self
.fail("expected rm of subvolume with retained snapshots to fail")
4682 # remove with snapshot retention
4683 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4686 subvol_info
= json
.loads(self
._fs
_cmd
("subvolume", "info", self
.volname
, subvolume
))
4687 self
.assertEqual(subvol_info
["state"], "snapshot-retained",
4688 msg
="expected state to be 'snapshot-retained', found '{0}".format(subvol_info
["state"]))
4690 ## test allowed ops in retained state
4692 subvolumes
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4693 self
.assertEqual(len(subvolumes
), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes
)))
4694 self
.assertEqual(subvolumes
[0]['name'], subvolume
,
4695 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume
, subvolumes
[0]['name']))
4698 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
4700 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
4701 self
.assertEqual(snap_info
["has_pending_clones"], "no")
4703 # rm --force (allowed but should fail)
4705 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--force")
4706 except CommandFailedError
as ce
:
4707 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of subvolume with retained snapshots")
4709 self
.fail("expected rm of subvolume with retained snapshots to fail")
4711 # rm (allowed but should fail)
4713 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4714 except CommandFailedError
as ce
:
4715 self
.assertEqual(ce
.exitstatus
, errno
.ENOTEMPTY
, "invalid error code on rm of subvolume with retained snapshots")
4717 self
.fail("expected rm of subvolume with retained snapshots to fail")
4719 ## test disallowed ops
4722 self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume
)
4723 except CommandFailedError
as ce
:
4724 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on getpath of subvolume with retained snapshots")
4726 self
.fail("expected getpath of subvolume with retained snapshots to fail")
4729 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024
4731 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
4732 except CommandFailedError
as ce
:
4733 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on resize of subvolume with retained snapshots")
4735 self
.fail("expected resize of subvolume with retained snapshots to fail")
4739 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, "fail")
4740 except CommandFailedError
as ce
:
4741 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on snapshot create of subvolume with retained snapshots")
4743 self
.fail("expected snapshot create of subvolume with retained snapshots to fail")
4745 # remove snapshot (should remove volume)
4746 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4748 # verify list subvolumes returns an empty list
4749 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4750 self
.assertEqual(len(subvolumels
), 0)
4752 # verify trash dir is clean
4753 self
._wait
_for
_trash
_empty
()
4755 def test_subvolume_retain_snapshot_without_snapshots(self
):
4757 ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
4759 subvolume
= self
._generate
_random
_subvolume
_name
()
4762 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4764 # remove with snapshot retention (should remove volume, no snapshots to retain)
4765 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4767 # verify list subvolumes returns an empty list
4768 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
4769 self
.assertEqual(len(subvolumels
), 0)
4771 # verify trash dir is clean
4772 self
._wait
_for
_trash
_empty
()
4774 def test_subvolume_retain_snapshot_trash_busy_recreate(self
):
4776 ensure retained subvolume recreate fails if its trash is not yet purged
4778 subvolume
= self
._generate
_random
_subvolume
_name
()
4779 snapshot
= self
._generate
_random
_snapshot
_name
()
4782 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4784 # snapshot subvolume
4785 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4787 # remove with snapshot retention
4788 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
4790 # fake a trash entry
4791 self
._update
_fake
_trash
(subvolume
)
4793 # recreate subvolume
4795 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4796 except CommandFailedError
as ce
:
4797 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on recreate of subvolume with purge pending")
4799 self
.fail("expected recreate of subvolume with purge pending to fail")
4801 # clear fake trash entry
4802 self
._update
_fake
_trash
(subvolume
, create
=False)
4804 # recreate subvolume
4805 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4808 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4811 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4813 # verify trash dir is clean
4814 self
._wait
_for
_trash
_empty
()
4816 def test_subvolume_rm_with_snapshots(self
):
4817 subvolume
= self
._generate
_random
_subvolume
_name
()
4818 snapshot
= self
._generate
_random
_snapshot
_name
()
4821 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
4823 # snapshot subvolume
4824 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4826 # remove subvolume -- should fail with ENOTEMPTY since it has snapshots
4828 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4829 except CommandFailedError
as ce
:
4830 if ce
.exitstatus
!= errno
.ENOTEMPTY
:
4831 raise RuntimeError("invalid error code returned when deleting subvolume with snapshots")
4833 raise RuntimeError("expected subvolume deletion to fail")
4836 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4839 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4841 # verify trash dir is clean
4842 self
._wait
_for
_trash
_empty
()
4844 def test_subvolume_snapshot_protect_unprotect_sanity(self
):
4846 Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
4847 invoking the command does not cause errors, till they are removed from a subsequent release.
4849 subvolume
= self
._generate
_random
_subvolume
_name
()
4850 snapshot
= self
._generate
_random
_snapshot
_name
()
4851 clone
= self
._generate
_random
_clone
_name
()
4854 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
4857 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
4859 # snapshot subvolume
4860 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
4862 # now, protect snapshot
4863 self
._fs
_cmd
("subvolume", "snapshot", "protect", self
.volname
, subvolume
, snapshot
)
4866 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
4868 # check clone status
4869 self
._wait
_for
_clone
_to
_complete
(clone
)
4871 # now, unprotect snapshot
4872 self
._fs
_cmd
("subvolume", "snapshot", "unprotect", self
.volname
, subvolume
, snapshot
)
4875 self
._verify
_clone
(subvolume
, snapshot
, clone
)
4878 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
4881 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
4882 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
4884 # verify trash dir is clean
4885 self
._wait
_for
_trash
_empty
()
4887 def test_subvolume_snapshot_rm_force(self
):
4888 # test removing non existing subvolume snapshot with --force
4889 subvolume
= self
._generate
_random
_subvolume
_name
()
4890 snapshot
= self
._generate
_random
_snapshot
_name
()
4894 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, "--force")
4895 except CommandFailedError
:
4896 raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed")
4898 def test_subvolume_snapshot_metadata_set(self
):
4900 Set custom metadata for subvolume snapshot.
4902 subvolname
= self
._generate
_random
_subvolume
_name
()
4903 group
= self
._generate
_random
_group
_name
()
4904 snapshot
= self
._generate
_random
_snapshot
_name
()
4907 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4909 # create subvolume in group.
4910 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4912 # snapshot subvolume
4913 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4915 # set metadata for snapshot.
4919 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
4920 except CommandFailedError
:
4921 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
4923 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4924 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4925 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4927 # verify trash dir is clean.
4928 self
._wait
_for
_trash
_empty
()
4930 def test_subvolume_snapshot_metadata_set_idempotence(self
):
4932 Set custom metadata for subvolume snapshot (Idempotency).
4934 subvolname
= self
._generate
_random
_subvolume
_name
()
4935 group
= self
._generate
_random
_group
_name
()
4936 snapshot
= self
._generate
_random
_snapshot
_name
()
4939 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4941 # create subvolume in group.
4942 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4944 # snapshot subvolume
4945 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4947 # set metadata for snapshot.
4951 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
4952 except CommandFailedError
:
4953 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
4955 # set same metadata again for subvolume.
4957 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
4958 except CommandFailedError
:
4959 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed because it is idempotent operation")
4961 # get value for specified key.
4963 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
4964 except CommandFailedError
:
4965 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
4967 # remove '\n' from returned value.
4968 ret
= ret
.strip('\n')
4970 # match received value with expected value.
4971 self
.assertEqual(value
, ret
)
4973 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
4974 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
4975 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
4977 # verify trash dir is clean.
4978 self
._wait
_for
_trash
_empty
()
4980 def test_subvolume_snapshot_metadata_get(self
):
4982 Get custom metadata for a specified key in subvolume snapshot metadata.
4984 subvolname
= self
._generate
_random
_subvolume
_name
()
4985 group
= self
._generate
_random
_group
_name
()
4986 snapshot
= self
._generate
_random
_snapshot
_name
()
4989 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
4991 # create subvolume in group.
4992 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
4994 # snapshot subvolume
4995 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
4997 # set metadata for snapshot.
5000 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5002 # get value for specified key.
5004 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5005 except CommandFailedError
:
5006 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5008 # remove '\n' from returned value.
5009 ret
= ret
.strip('\n')
5011 # match received value with expected value.
5012 self
.assertEqual(value
, ret
)
5014 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5015 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5016 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5018 # verify trash dir is clean.
5019 self
._wait
_for
_trash
_empty
()
5021 def test_subvolume_snapshot_metadata_get_for_nonexisting_key(self
):
5023 Get custom metadata for subvolume snapshot if specified key not exist in metadata.
5025 subvolname
= self
._generate
_random
_subvolume
_name
()
5026 group
= self
._generate
_random
_group
_name
()
5027 snapshot
= self
._generate
_random
_snapshot
_name
()
5030 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5032 # create subvolume in group.
5033 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5035 # snapshot subvolume
5036 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5038 # set metadata for snapshot.
5041 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5043 # try to get value for nonexisting key
5044 # Expecting ENOENT exit status because key does not exist
5046 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, "key_nonexist", group
)
5047 except CommandFailedError
as e
:
5048 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5050 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
5052 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5053 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5054 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5056 # verify trash dir is clean.
5057 self
._wait
_for
_trash
_empty
()
5059 def test_subvolume_snapshot_metadata_get_for_nonexisting_section(self
):
5061 Get custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5063 subvolname
= self
._generate
_random
_subvolume
_name
()
5064 group
= self
._generate
_random
_group
_name
()
5065 snapshot
= self
._generate
_random
_snapshot
_name
()
5068 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5070 # create subvolume in group.
5071 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5073 # snapshot subvolume
5074 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5076 # try to get value for nonexisting key (as section does not exist)
5077 # Expecting ENOENT exit status because key does not exist
5079 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, "key", group
)
5080 except CommandFailedError
as e
:
5081 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5083 self
.fail("Expected ENOENT because section does not exist")
5085 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5086 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5087 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5089 # verify trash dir is clean.
5090 self
._wait
_for
_trash
_empty
()
5092 def test_subvolume_snapshot_metadata_update(self
):
5094 Update custom metadata for a specified key in subvolume snapshot metadata.
5096 subvolname
= self
._generate
_random
_subvolume
_name
()
5097 group
= self
._generate
_random
_group
_name
()
5098 snapshot
= self
._generate
_random
_snapshot
_name
()
5101 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5103 # create subvolume in group.
5104 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5106 # snapshot subvolume
5107 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5109 # set metadata for snapshot.
5112 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5114 # update metadata against key.
5115 new_value
= "new_value"
5116 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, new_value
, group
)
5118 # get metadata for specified key of snapshot.
5120 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5121 except CommandFailedError
:
5122 self
.fail("expected the 'fs subvolume snapshot metadata get' command to succeed")
5124 # remove '\n' from returned value.
5125 ret
= ret
.strip('\n')
5127 # match received value with expected value.
5128 self
.assertEqual(new_value
, ret
)
5130 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5131 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5132 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5134 # verify trash dir is clean.
5135 self
._wait
_for
_trash
_empty
()
5137 def test_subvolume_snapshot_metadata_list(self
):
5139 List custom metadata for subvolume snapshot.
5141 subvolname
= self
._generate
_random
_subvolume
_name
()
5142 group
= self
._generate
_random
_group
_name
()
5143 snapshot
= self
._generate
_random
_snapshot
_name
()
5146 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5148 # create subvolume in group.
5149 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5151 # snapshot subvolume
5152 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5154 # set metadata for subvolume.
5155 input_metadata_dict
= {f
'key_{i}' : f
'value_{i}' for i
in range(3)}
5157 for k
, v
in input_metadata_dict
.items():
5158 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, k
, v
, group
)
5162 ret_dict
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "metadata", "ls", self
.volname
, subvolname
, snapshot
, group
))
5163 except CommandFailedError
:
5164 self
.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5166 # compare output with expected output
5167 self
.assertDictEqual(input_metadata_dict
, ret_dict
)
5169 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5170 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5171 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5173 # verify trash dir is clean.
5174 self
._wait
_for
_trash
_empty
()
5176 def test_subvolume_snapshot_metadata_list_if_no_metadata_set(self
):
5178 List custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5180 subvolname
= self
._generate
_random
_subvolume
_name
()
5181 group
= self
._generate
_random
_group
_name
()
5182 snapshot
= self
._generate
_random
_snapshot
_name
()
5185 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5187 # create subvolume in group.
5188 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5190 # snapshot subvolume
5191 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5195 ret_dict
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "metadata", "ls", self
.volname
, subvolname
, snapshot
, group
))
5196 except CommandFailedError
:
5197 self
.fail("expected the 'fs subvolume snapshot metadata ls' command to succeed")
5199 # compare output with expected output
5201 self
.assertDictEqual(ret_dict
, empty_dict
)
5203 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5204 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5205 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5207 # verify trash dir is clean.
5208 self
._wait
_for
_trash
_empty
()
5210 def test_subvolume_snapshot_metadata_remove(self
):
5212 Remove custom metadata for a specified key in subvolume snapshot metadata.
5214 subvolname
= self
._generate
_random
_subvolume
_name
()
5215 group
= self
._generate
_random
_group
_name
()
5216 snapshot
= self
._generate
_random
_snapshot
_name
()
5219 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5221 # create subvolume in group.
5222 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5224 # snapshot subvolume
5225 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5227 # set metadata for snapshot.
5230 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5232 # remove metadata against specified key.
5234 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
)
5235 except CommandFailedError
:
5236 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5238 # confirm key is removed by again fetching metadata
5240 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, key
, snapshot
, group
)
5241 except CommandFailedError
as e
:
5242 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5244 self
.fail("Expected ENOENT because key does not exist")
5246 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5247 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5248 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5250 # verify trash dir is clean.
5251 self
._wait
_for
_trash
_empty
()
5253 def test_subvolume_snapshot_metadata_remove_for_nonexisting_key(self
):
5255 Remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5257 subvolname
= self
._generate
_random
_subvolume
_name
()
5258 group
= self
._generate
_random
_group
_name
()
5259 snapshot
= self
._generate
_random
_snapshot
_name
()
5262 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5264 # create subvolume in group.
5265 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5267 # snapshot subvolume
5268 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5270 # set metadata for snapshot.
5273 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5275 # try to remove value for nonexisting key
5276 # Expecting ENOENT exit status because key does not exist
5278 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, "key_nonexist", group
)
5279 except CommandFailedError
as e
:
5280 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5282 self
.fail("Expected ENOENT because 'key_nonexist' does not exist")
5284 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5285 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5286 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5288 # verify trash dir is clean.
5289 self
._wait
_for
_trash
_empty
()
5291 def test_subvolume_snapshot_metadata_remove_for_nonexisting_section(self
):
5293 Remove custom metadata for subvolume snapshot if metadata is not added for subvolume snapshot.
5295 subvolname
= self
._generate
_random
_subvolume
_name
()
5296 group
= self
._generate
_random
_group
_name
()
5297 snapshot
= self
._generate
_random
_snapshot
_name
()
5300 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5302 # create subvolume in group.
5303 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5305 # snapshot subvolume
5306 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5308 # try to remove value for nonexisting key (as section does not exist)
5309 # Expecting ENOENT exit status because key does not exist
5311 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, "key", group
)
5312 except CommandFailedError
as e
:
5313 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5315 self
.fail("Expected ENOENT because section does not exist")
5317 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5318 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5319 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5321 # verify trash dir is clean.
5322 self
._wait
_for
_trash
_empty
()
5324 def test_subvolume_snapshot_metadata_remove_force(self
):
5326 Forcefully remove custom metadata for a specified key in subvolume snapshot metadata.
5328 subvolname
= self
._generate
_random
_subvolume
_name
()
5329 group
= self
._generate
_random
_group
_name
()
5330 snapshot
= self
._generate
_random
_snapshot
_name
()
5333 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5335 # create subvolume in group.
5336 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5338 # snapshot subvolume
5339 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5341 # set metadata for snapshot.
5344 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5346 # remove metadata against specified key with --force option.
5348 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
, "--force")
5349 except CommandFailedError
:
5350 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5352 # confirm key is removed by again fetching metadata
5354 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5355 except CommandFailedError
as e
:
5356 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5358 self
.fail("Expected ENOENT because key does not exist")
5360 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5361 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5362 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5364 # verify trash dir is clean.
5365 self
._wait
_for
_trash
_empty
()
5367 def test_subvolume_snapshot_metadata_remove_force_for_nonexisting_key(self
):
5369 Forcefully remove custom metadata for subvolume snapshot if specified key not exist in metadata.
5371 subvolname
= self
._generate
_random
_subvolume
_name
()
5372 group
= self
._generate
_random
_group
_name
()
5373 snapshot
= self
._generate
_random
_snapshot
_name
()
5376 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5378 # create subvolume in group.
5379 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5381 # snapshot subvolume
5382 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5384 # set metadata for snapshot.
5387 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5389 # remove metadata against specified key.
5391 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
)
5392 except CommandFailedError
:
5393 self
.fail("expected the 'fs subvolume snapshot metadata rm' command to succeed")
5395 # confirm key is removed by again fetching metadata
5397 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5398 except CommandFailedError
as e
:
5399 self
.assertEqual(e
.exitstatus
, errno
.ENOENT
)
5401 self
.fail("Expected ENOENT because key does not exist")
5403 # again remove metadata against already removed key with --force option.
5405 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "rm", self
.volname
, subvolname
, snapshot
, key
, group
, "--force")
5406 except CommandFailedError
:
5407 self
.fail("expected the 'fs subvolume snapshot metadata rm' (with --force) command to succeed")
5409 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5410 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5411 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5413 # verify trash dir is clean.
5414 self
._wait
_for
_trash
_empty
()
5416 def test_subvolume_snapshot_metadata_after_snapshot_remove(self
):
5418 Verify metadata removal of subvolume snapshot after snapshot removal.
5420 subvolname
= self
._generate
_random
_subvolume
_name
()
5421 group
= self
._generate
_random
_group
_name
()
5422 snapshot
= self
._generate
_random
_snapshot
_name
()
5425 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5427 # create subvolume in group.
5428 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5430 # snapshot subvolume
5431 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5433 # set metadata for snapshot.
5436 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5438 # get value for specified key.
5439 ret
= self
._fs
_cmd
("subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
)
5441 # remove '\n' from returned value.
5442 ret
= ret
.strip('\n')
5444 # match received value with expected value.
5445 self
.assertEqual(value
, ret
)
5447 # remove subvolume snapshot.
5448 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5450 # try to get metadata after removing snapshot.
5451 # Expecting error ENOENT with error message of snapshot does not exist
5452 cmd_ret
= self
.mgr_cluster
.mon_manager
.run_cluster_cmd(
5453 args
=["fs", "subvolume", "snapshot", "metadata", "get", self
.volname
, subvolname
, snapshot
, key
, group
],
5454 check_status
=False, stdout
=StringIO(), stderr
=StringIO())
5455 self
.assertEqual(cmd_ret
.returncode
, errno
.ENOENT
, "Expecting ENOENT error")
5456 self
.assertIn(f
"snapshot '{snapshot}' does not exist", cmd_ret
.stderr
.getvalue(),
5457 f
"Expecting message: snapshot '{snapshot}' does not exist ")
5459 # confirm metadata is removed by searching section name in .meta file
5460 meta_path
= os
.path
.join(".", "volumes", group
, subvolname
, ".meta")
5461 section_name
= "SNAP_METADATA_" + snapshot
5464 self
.mount_a
.run_shell(f
"sudo grep {section_name} {meta_path}", omit_sudo
=False)
5465 except CommandFailedError
as e
:
5466 self
.assertNotEqual(e
.exitstatus
, 0)
5468 self
.fail("Expected non-zero exist status because section should not exist")
5470 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5471 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5473 # verify trash dir is clean.
5474 self
._wait
_for
_trash
_empty
()
5476 def test_clean_stale_subvolume_snapshot_metadata(self
):
5478 Validate cleaning of stale subvolume snapshot metadata.
5480 subvolname
= self
._generate
_random
_subvolume
_name
()
5481 group
= self
._generate
_random
_group
_name
()
5482 snapshot
= self
._generate
_random
_snapshot
_name
()
5485 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5487 # create subvolume in group.
5488 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolname
, group
)
5490 # snapshot subvolume
5491 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolname
, snapshot
, group
)
5493 # set metadata for snapshot.
5497 self
._fs
_cmd
("subvolume", "snapshot", "metadata", "set", self
.volname
, subvolname
, snapshot
, key
, value
, group
)
5498 except CommandFailedError
:
5499 self
.fail("expected the 'fs subvolume snapshot metadata set' command to succeed")
5501 # save the subvolume config file.
5502 meta_path
= os
.path
.join(".", "volumes", group
, subvolname
, ".meta")
5503 tmp_meta_path
= os
.path
.join(".", "volumes", group
, subvolname
, ".meta.stale_snap_section")
5504 self
.mount_a
.run_shell(['sudo', 'cp', '-p', meta_path
, tmp_meta_path
], omit_sudo
=False)
5506 # Delete snapshot, this would remove user snap metadata
5507 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolname
, snapshot
, group
)
5509 # Copy back saved subvolume config file. This would have stale snapshot metadata
5510 self
.mount_a
.run_shell(['sudo', 'cp', '-p', tmp_meta_path
, meta_path
], omit_sudo
=False)
5512 # Verify that it has stale snapshot metadata
5513 section_name
= "SNAP_METADATA_" + snapshot
5515 self
.mount_a
.run_shell(f
"sudo grep {section_name} {meta_path}", omit_sudo
=False)
5516 except CommandFailedError
:
5517 self
.fail("Expected grep cmd to succeed because stale snapshot metadata exist")
5519 # Do any subvolume operation to clean the stale snapshot metadata
5520 _
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolname
, group
))
5522 # Verify that the stale snapshot metadata is cleaned
5524 self
.mount_a
.run_shell(f
"sudo grep {section_name} {meta_path}", omit_sudo
=False)
5525 except CommandFailedError
as e
:
5526 self
.assertNotEqual(e
.exitstatus
, 0)
5528 self
.fail("Expected non-zero exist status because stale snapshot metadata should not exist")
5530 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolname
, group
)
5531 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5533 # verify trash dir is clean.
5534 self
._wait
_for
_trash
_empty
()
5535 # Clean tmp config file
5536 self
.mount_a
.run_shell(['sudo', 'rm', '-f', tmp_meta_path
], omit_sudo
=False)
5539 class TestSubvolumeSnapshotClones(TestVolumesHelper
):
5540 """ Tests for FS subvolume snapshot clone operations."""
5541 def test_clone_subvolume_info(self
):
5542 # tests the 'fs subvolume info' command for a clone
5543 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
5544 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
5547 subvolume
= self
._generate
_random
_subvolume
_name
()
5548 snapshot
= self
._generate
_random
_snapshot
_name
()
5549 clone
= self
._generate
_random
_clone
_name
()
5552 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5555 self
._do
_subvolume
_io
(subvolume
, number_of_files
=1)
5557 # snapshot subvolume
5558 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5561 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5563 # check clone status
5564 self
._wait
_for
_clone
_to
_complete
(clone
)
5567 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5569 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, clone
))
5570 if len(subvol_info
) == 0:
5571 raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
5572 for md
in subvol_md
:
5573 if md
not in subvol_info
.keys():
5574 raise RuntimeError("%s not present in the metadata of subvolume" % md
)
5575 if subvol_info
["type"] != "clone":
5576 raise RuntimeError("type should be set to clone")
5579 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5580 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5582 # verify trash dir is clean
5583 self
._wait
_for
_trash
_empty
()
5585 def test_subvolume_snapshot_info_without_snapshot_clone(self
):
5587 Verify subvolume snapshot info output without clonnnig snapshot.
5588 If no clone is performed then path /volumes/_index/clone/{track_id}
5591 subvolume
= self
._generate
_random
_subvolume
_name
()
5592 snapshot
= self
._generate
_random
_snapshot
_name
()
5595 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5597 # snapshot subvolume
5598 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5600 # list snapshot info
5601 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5603 # verify snapshot info
5604 self
.assertEqual(result
['has_pending_clones'], "no")
5605 self
.assertFalse('orphan_clones_count' in result
)
5606 self
.assertFalse('pending_clones' in result
)
5608 # remove snapshot, subvolume, clone
5609 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5610 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5612 # verify trash dir is clean
5613 self
._wait
_for
_trash
_empty
()
5615 def test_subvolume_snapshot_info_if_no_clone_pending(self
):
5617 Verify subvolume snapshot info output if no clone is in pending state.
5619 subvolume
= self
._generate
_random
_subvolume
_name
()
5620 snapshot
= self
._generate
_random
_snapshot
_name
()
5621 clone_list
= [f
'clone_{i}' for i
in range(3)]
5624 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5626 # snapshot subvolume
5627 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5630 for clone
in clone_list
:
5631 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5633 # check clones status
5634 for clone
in clone_list
:
5635 self
._wait
_for
_clone
_to
_complete
(clone
)
5637 # list snapshot info
5638 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5640 # verify snapshot info
5641 self
.assertEqual(result
['has_pending_clones'], "no")
5642 self
.assertFalse('orphan_clones_count' in result
)
5643 self
.assertFalse('pending_clones' in result
)
5645 # remove snapshot, subvolume, clone
5646 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5647 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5648 for clone
in clone_list
:
5649 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5651 # verify trash dir is clean
5652 self
._wait
_for
_trash
_empty
()
5654 def test_subvolume_snapshot_info_if_clone_pending_for_no_group(self
):
5656 Verify subvolume snapshot info output if clones are in pending state.
5657 Clones are not specified for particular target_group. Hence target_group
5658 should not be in the output as we don't show _nogroup (default group)
5660 subvolume
= self
._generate
_random
_subvolume
_name
()
5661 snapshot
= self
._generate
_random
_snapshot
_name
()
5662 clone_list
= [f
'clone_{i}' for i
in range(3)]
5665 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5667 # snapshot subvolume
5668 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5670 # insert delay at the beginning of snapshot clone
5671 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5674 for clone
in clone_list
:
5675 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5677 # list snapshot info
5678 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5680 # verify snapshot info
5681 expected_clone_list
= []
5682 for clone
in clone_list
:
5683 expected_clone_list
.append({"name": clone
})
5684 self
.assertEqual(result
['has_pending_clones'], "yes")
5685 self
.assertFalse('orphan_clones_count' in result
)
5686 self
.assertListEqual(result
['pending_clones'], expected_clone_list
)
5687 self
.assertEqual(len(result
['pending_clones']), 3)
5689 # check clones status
5690 for clone
in clone_list
:
5691 self
._wait
_for
_clone
_to
_complete
(clone
)
5693 # remove snapshot, subvolume, clone
5694 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5695 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5696 for clone
in clone_list
:
5697 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5699 # verify trash dir is clean
5700 self
._wait
_for
_trash
_empty
()
5702 def test_subvolume_snapshot_info_if_clone_pending_for_target_group(self
):
5704 Verify subvolume snapshot info output if clones are in pending state.
5705 Clones are not specified for target_group.
5707 subvolume
= self
._generate
_random
_subvolume
_name
()
5708 snapshot
= self
._generate
_random
_snapshot
_name
()
5709 clone
= self
._generate
_random
_clone
_name
()
5710 group
= self
._generate
_random
_group
_name
()
5711 target_group
= self
._generate
_random
_group
_name
()
5714 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
5715 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, target_group
)
5718 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, group
, "--mode=777")
5720 # snapshot subvolume
5721 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
5723 # insert delay at the beginning of snapshot clone
5724 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5727 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
,
5728 "--group_name", group
, "--target_group_name", target_group
)
5730 # list snapshot info
5731 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
, "--group_name", group
))
5733 # verify snapshot info
5734 expected_clone_list
= [{"name": clone
, "target_group": target_group
}]
5735 self
.assertEqual(result
['has_pending_clones'], "yes")
5736 self
.assertFalse('orphan_clones_count' in result
)
5737 self
.assertListEqual(result
['pending_clones'], expected_clone_list
)
5738 self
.assertEqual(len(result
['pending_clones']), 1)
5740 # check clone status
5741 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=target_group
)
5744 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
5747 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
5748 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, target_group
)
5751 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
5752 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, target_group
)
5754 # verify trash dir is clean
5755 self
._wait
_for
_trash
_empty
()
5757 def test_subvolume_snapshot_info_if_orphan_clone(self
):
5759 Verify subvolume snapshot info output if orphan clones exists.
5760 Orphan clones should not list under pending clones.
5761 orphan_clones_count should display correct count of orphan clones'
5763 subvolume
= self
._generate
_random
_subvolume
_name
()
5764 snapshot
= self
._generate
_random
_snapshot
_name
()
5765 clone_list
= [f
'clone_{i}' for i
in range(3)]
5768 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5770 # snapshot subvolume
5771 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5773 # insert delay at the beginning of snapshot clone
5774 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
5777 for clone
in clone_list
:
5778 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5780 # remove track file for third clone to make it orphan
5781 meta_path
= os
.path
.join(".", "volumes", "_nogroup", subvolume
, ".meta")
5782 pending_clones_result
= self
.mount_a
.run_shell(f
"sudo grep \"clone snaps\" -A3 {meta_path}", omit_sudo
=False, stdout
=StringIO(), stderr
=StringIO())
5783 third_clone_track_id
= pending_clones_result
.stdout
.getvalue().splitlines()[3].split(" = ")[0]
5784 third_clone_track_path
= os
.path
.join(".", "volumes", "_index", "clone", third_clone_track_id
)
5785 self
.mount_a
.run_shell(f
"sudo rm -f {third_clone_track_path}", omit_sudo
=False)
5787 # list snapshot info
5788 result
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5790 # verify snapshot info
5791 expected_clone_list
= []
5792 for i
in range(len(clone_list
)-1):
5793 expected_clone_list
.append({"name": clone_list
[i
]})
5794 self
.assertEqual(result
['has_pending_clones'], "yes")
5795 self
.assertEqual(result
['orphan_clones_count'], 1)
5796 self
.assertListEqual(result
['pending_clones'], expected_clone_list
)
5797 self
.assertEqual(len(result
['pending_clones']), 2)
5799 # check clones status
5800 for i
in range(len(clone_list
)-1):
5801 self
._wait
_for
_clone
_to
_complete
(clone_list
[i
])
5803 # list snapshot info after cloning completion
5804 res
= json
.loads(self
._fs
_cmd
("subvolume", "snapshot", "info", self
.volname
, subvolume
, snapshot
))
5806 # verify snapshot info (has_pending_clones should be no)
5807 self
.assertEqual(res
['has_pending_clones'], "no")
5809 def test_non_clone_status(self
):
5810 subvolume
= self
._generate
_random
_subvolume
_name
()
5813 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
5816 self
._fs
_cmd
("clone", "status", self
.volname
, subvolume
)
5817 except CommandFailedError
as ce
:
5818 if ce
.exitstatus
!= errno
.ENOTSUP
:
5819 raise RuntimeError("invalid error code when fetching status of a non cloned subvolume")
5821 raise RuntimeError("expected fetching of clone status of a subvolume to fail")
5824 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5826 # verify trash dir is clean
5827 self
._wait
_for
_trash
_empty
()
5829 def test_subvolume_clone_inherit_snapshot_namespace_and_size(self
):
5830 subvolume
= self
._generate
_random
_subvolume
_name
()
5831 snapshot
= self
._generate
_random
_snapshot
_name
()
5832 clone
= self
._generate
_random
_clone
_name
()
5833 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*12
5835 # create subvolume, in an isolated namespace with a specified size
5836 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--namespace-isolated", "--size", str(osize
), "--mode=777")
5839 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
5841 # snapshot subvolume
5842 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5844 # create a pool different from current subvolume pool
5845 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
5846 default_pool
= self
.mount_a
.getfattr(subvol_path
, "ceph.dir.layout.pool")
5847 new_pool
= "new_pool"
5848 self
.assertNotEqual(default_pool
, new_pool
)
5849 self
.fs
.add_data_pool(new_pool
)
5851 # update source subvolume pool
5852 self
._do
_subvolume
_pool
_and
_namespace
_update
(subvolume
, pool
=new_pool
, pool_namespace
="")
5854 # schedule a clone, with NO --pool specification
5855 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5857 # check clone status
5858 self
._wait
_for
_clone
_to
_complete
(clone
)
5861 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5864 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5867 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5868 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5870 # verify trash dir is clean
5871 self
._wait
_for
_trash
_empty
()
5873 def test_subvolume_clone_inherit_quota_attrs(self
):
5874 subvolume
= self
._generate
_random
_subvolume
_name
()
5875 snapshot
= self
._generate
_random
_snapshot
_name
()
5876 clone
= self
._generate
_random
_clone
_name
()
5877 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*12
5879 # create subvolume with a specified size
5880 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777", "--size", str(osize
))
5883 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
5885 # get subvolume path
5886 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
5888 # set quota on number of files
5889 self
.mount_a
.setfattr(subvolpath
, 'ceph.quota.max_files', "20", sudo
=True)
5891 # snapshot subvolume
5892 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5895 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5897 # check clone status
5898 self
._wait
_for
_clone
_to
_complete
(clone
)
5901 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5903 # get subvolume path
5904 clonepath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
5906 # verify quota max_files is inherited from source snapshot
5907 subvol_quota
= self
.mount_a
.getfattr(subvolpath
, "ceph.quota.max_files")
5908 clone_quota
= self
.mount_a
.getfattr(clonepath
, "ceph.quota.max_files")
5909 self
.assertEqual(subvol_quota
, clone_quota
)
5912 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5915 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5916 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5918 # verify trash dir is clean
5919 self
._wait
_for
_trash
_empty
()
5921 def test_subvolume_clone_in_progress_getpath(self
):
5922 subvolume
= self
._generate
_random
_subvolume
_name
()
5923 snapshot
= self
._generate
_random
_snapshot
_name
()
5924 clone
= self
._generate
_random
_clone
_name
()
5927 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5930 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
5932 # snapshot subvolume
5933 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5935 # Insert delay at the beginning of snapshot clone
5936 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5939 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5941 # clone should not be accessible right now
5943 self
._get
_subvolume
_path
(self
.volname
, clone
)
5944 except CommandFailedError
as ce
:
5945 if ce
.exitstatus
!= errno
.EAGAIN
:
5946 raise RuntimeError("invalid error code when fetching path of an pending clone")
5948 raise RuntimeError("expected fetching path of an pending clone to fail")
5950 # check clone status
5951 self
._wait
_for
_clone
_to
_complete
(clone
)
5953 # clone should be accessible now
5954 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
5955 self
.assertNotEqual(subvolpath
, None)
5958 self
._verify
_clone
(subvolume
, snapshot
, clone
)
5961 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5964 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
5965 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
5967 # verify trash dir is clean
5968 self
._wait
_for
_trash
_empty
()
5970 def test_subvolume_clone_in_progress_snapshot_rm(self
):
5971 subvolume
= self
._generate
_random
_subvolume
_name
()
5972 snapshot
= self
._generate
_random
_snapshot
_name
()
5973 clone
= self
._generate
_random
_clone
_name
()
5976 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
5979 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
5981 # snapshot subvolume
5982 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
5984 # Insert delay at the beginning of snapshot clone
5985 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
5988 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
5990 # snapshot should not be deletable now
5992 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
5993 except CommandFailedError
as ce
:
5994 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, msg
="invalid error code when removing source snapshot of a clone")
5996 self
.fail("expected removing source snapshot of a clone to fail")
5998 # check clone status
5999 self
._wait
_for
_clone
_to
_complete
(clone
)
6001 # clone should be accessible now
6002 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
6003 self
.assertNotEqual(subvolpath
, None)
6006 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6009 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6012 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6013 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6015 # verify trash dir is clean
6016 self
._wait
_for
_trash
_empty
()
6018 def test_subvolume_clone_in_progress_source(self
):
6019 subvolume
= self
._generate
_random
_subvolume
_name
()
6020 snapshot
= self
._generate
_random
_snapshot
_name
()
6021 clone
= self
._generate
_random
_clone
_name
()
6024 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6027 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6029 # snapshot subvolume
6030 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6032 # Insert delay at the beginning of snapshot clone
6033 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6036 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6038 # verify clone source
6039 result
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, clone
))
6040 source
= result
['status']['source']
6041 self
.assertEqual(source
['volume'], self
.volname
)
6042 self
.assertEqual(source
['subvolume'], subvolume
)
6043 self
.assertEqual(source
.get('group', None), None)
6044 self
.assertEqual(source
['snapshot'], snapshot
)
6046 # check clone status
6047 self
._wait
_for
_clone
_to
_complete
(clone
)
6049 # clone should be accessible now
6050 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, clone
)
6051 self
.assertNotEqual(subvolpath
, None)
6054 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6057 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6060 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6061 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6063 # verify trash dir is clean
6064 self
._wait
_for
_trash
_empty
()
6066 def test_subvolume_clone_retain_snapshot_with_snapshots(self
):
6068 retain snapshots of a cloned subvolume and check disallowed operations
6070 subvolume
= self
._generate
_random
_subvolume
_name
()
6071 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
6072 clone
= self
._generate
_random
_clone
_name
()
6075 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6077 # store path for clone verification
6078 subvol1_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6081 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6083 # snapshot subvolume
6084 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
6086 # remove with snapshot retention
6087 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6089 # clone retained subvolume snapshot
6090 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot1
, clone
)
6092 # check clone status
6093 self
._wait
_for
_clone
_to
_complete
(clone
)
6096 self
._verify
_clone
(subvolume
, snapshot1
, clone
, subvol_path
=subvol1_path
)
6098 # create a snapshot on the clone
6099 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone
, snapshot2
)
6102 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--retain-snapshots")
6105 clonesnapshotls
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, clone
))
6106 self
.assertEqual(len(clonesnapshotls
), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
6107 " created subvolume snapshots")
6108 snapshotnames
= [snapshot
['name'] for snapshot
in clonesnapshotls
]
6109 for snap
in [snapshot2
]:
6110 self
.assertIn(snap
, snapshotnames
, "Missing snapshot '{0}' in snapshot list".format(snap
))
6112 ## check disallowed operations on retained clone
6115 self
._fs
_cmd
("clone", "status", self
.volname
, clone
)
6116 except CommandFailedError
as ce
:
6117 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on clone status of clone with retained snapshots")
6119 self
.fail("expected clone status of clone with retained snapshots to fail")
6123 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6124 except CommandFailedError
as ce
:
6125 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
, "invalid error code on clone cancel of clone with retained snapshots")
6127 self
.fail("expected clone cancel of clone with retained snapshots to fail")
6129 # remove snapshots (removes subvolumes as all are in retained state)
6130 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
6131 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone
, snapshot2
)
6133 # verify list subvolumes returns an empty list
6134 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6135 self
.assertEqual(len(subvolumels
), 0)
6137 # verify trash dir is clean
6138 self
._wait
_for
_trash
_empty
()
6140 def test_subvolume_retain_snapshot_clone(self
):
6142 clone a snapshot from a snapshot retained subvolume
6144 subvolume
= self
._generate
_random
_subvolume
_name
()
6145 snapshot
= self
._generate
_random
_snapshot
_name
()
6146 clone
= self
._generate
_random
_clone
_name
()
6149 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6151 # store path for clone verification
6152 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6155 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6157 # snapshot subvolume
6158 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6160 # remove with snapshot retention
6161 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6163 # clone retained subvolume snapshot
6164 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6166 # check clone status
6167 self
._wait
_for
_clone
_to
_complete
(clone
)
6170 self
._verify
_clone
(subvolume
, snapshot
, clone
, subvol_path
=subvol_path
)
6172 # remove snapshots (removes retained volume)
6173 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6176 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6178 # verify list subvolumes returns an empty list
6179 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6180 self
.assertEqual(len(subvolumels
), 0)
6182 # verify trash dir is clean
6183 self
._wait
_for
_trash
_empty
()
6185 def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self
):
6187 clone a subvolume from recreated subvolume's latest snapshot
6189 subvolume
= self
._generate
_random
_subvolume
_name
()
6190 snapshot1
, snapshot2
= self
._generate
_random
_snapshot
_name
(2)
6191 clone
= self
._generate
_random
_clone
_name
(1)
6194 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6197 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6199 # snapshot subvolume
6200 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot1
)
6202 # remove with snapshot retention
6203 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6205 # recreate subvolume
6206 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6208 # get and store path for clone verification
6209 subvol2_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6212 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6214 # snapshot newer subvolume
6215 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot2
)
6217 # remove with snapshot retention
6218 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6220 # clone retained subvolume's newer snapshot
6221 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot2
, clone
)
6223 # check clone status
6224 self
._wait
_for
_clone
_to
_complete
(clone
)
6227 self
._verify
_clone
(subvolume
, snapshot2
, clone
, subvol_path
=subvol2_path
)
6230 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot1
)
6231 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot2
)
6234 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6236 # verify list subvolumes returns an empty list
6237 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6238 self
.assertEqual(len(subvolumels
), 0)
6240 # verify trash dir is clean
6241 self
._wait
_for
_trash
_empty
()
6243 def test_subvolume_retain_snapshot_recreate(self
):
6245 recreate a subvolume from one of its retained snapshots
6247 subvolume
= self
._generate
_random
_subvolume
_name
()
6248 snapshot
= self
._generate
_random
_snapshot
_name
()
6251 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6253 # store path for clone verification
6254 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
6257 self
._do
_subvolume
_io
(subvolume
, number_of_files
=16)
6259 # snapshot subvolume
6260 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6262 # remove with snapshot retention
6263 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, "--retain-snapshots")
6265 # recreate retained subvolume using its own snapshot to clone
6266 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, subvolume
)
6268 # check clone status
6269 self
._wait
_for
_clone
_to
_complete
(subvolume
)
6272 self
._verify
_clone
(subvolume
, snapshot
, subvolume
, subvol_path
=subvol_path
)
6275 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6278 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6280 # verify list subvolumes returns an empty list
6281 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
6282 self
.assertEqual(len(subvolumels
), 0)
6284 # verify trash dir is clean
6285 self
._wait
_for
_trash
_empty
()
6287 def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self
):
6289 ensure retained clone recreate fails if its trash is not yet purged
6291 subvolume
= self
._generate
_random
_subvolume
_name
()
6292 snapshot
= self
._generate
_random
_snapshot
_name
()
6293 clone
= self
._generate
_random
_clone
_name
()
6296 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
6298 # snapshot subvolume
6299 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6301 # clone subvolume snapshot
6302 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6304 # check clone status
6305 self
._wait
_for
_clone
_to
_complete
(clone
)
6308 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone
, snapshot
)
6310 # remove clone with snapshot retention
6311 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--retain-snapshots")
6313 # fake a trash entry
6314 self
._update
_fake
_trash
(clone
)
6316 # clone subvolume snapshot (recreate)
6318 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6319 except CommandFailedError
as ce
:
6320 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on recreate of clone with purge pending")
6322 self
.fail("expected recreate of clone with purge pending to fail")
6324 # clear fake trash entry
6325 self
._update
_fake
_trash
(clone
, create
=False)
6327 # recreate subvolume
6328 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6330 # check clone status
6331 self
._wait
_for
_clone
_to
_complete
(clone
)
6334 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6335 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone
, snapshot
)
6338 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6339 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6341 # verify trash dir is clean
6342 self
._wait
_for
_trash
_empty
()
6344 def test_subvolume_snapshot_attr_clone(self
):
6345 subvolume
= self
._generate
_random
_subvolume
_name
()
6346 snapshot
= self
._generate
_random
_snapshot
_name
()
6347 clone
= self
._generate
_random
_clone
_name
()
6350 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6353 self
._do
_subvolume
_io
_mixed
(subvolume
)
6355 # snapshot subvolume
6356 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6359 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6361 # check clone status
6362 self
._wait
_for
_clone
_to
_complete
(clone
)
6365 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6368 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6371 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6372 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6374 # verify trash dir is clean
6375 self
._wait
_for
_trash
_empty
()
6377 def test_clone_failure_status_pending_in_progress_complete(self
):
6379 ensure failure status is not shown when clone is not in failed/cancelled state
6381 subvolume
= self
._generate
_random
_subvolume
_name
()
6382 snapshot
= self
._generate
_random
_snapshot
_name
()
6383 clone1
= self
._generate
_random
_clone
_name
()
6386 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6389 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6391 # snapshot subvolume
6392 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6394 # Insert delay at the beginning of snapshot clone
6395 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6398 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6400 # pending clone shouldn't show failure status
6401 clone1_result
= self
._get
_clone
_status
(clone1
)
6403 clone1_result
["status"]["failure"]["errno"]
6404 except KeyError as e
:
6405 self
.assertEqual(str(e
), "'failure'")
6407 self
.fail("clone status shouldn't show failure for pending clone")
6409 # check clone1 to be in-progress
6410 self
._wait
_for
_clone
_to
_be
_in
_progress
(clone1
)
6412 # in-progress clone1 shouldn't show failure status
6413 clone1_result
= self
._get
_clone
_status
(clone1
)
6415 clone1_result
["status"]["failure"]["errno"]
6416 except KeyError as e
:
6417 self
.assertEqual(str(e
), "'failure'")
6419 self
.fail("clone status shouldn't show failure for in-progress clone")
6421 # wait for clone1 to complete
6422 self
._wait
_for
_clone
_to
_complete
(clone1
)
6424 # complete clone1 shouldn't show failure status
6425 clone1_result
= self
._get
_clone
_status
(clone1
)
6427 clone1_result
["status"]["failure"]["errno"]
6428 except KeyError as e
:
6429 self
.assertEqual(str(e
), "'failure'")
6431 self
.fail("clone status shouldn't show failure for complete clone")
6434 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6437 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6438 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
6440 # verify trash dir is clean
6441 self
._wait
_for
_trash
_empty
()
6443 def test_clone_failure_status_failed(self
):
6445 ensure failure status is shown when clone is in failed state and validate the reason
6447 subvolume
= self
._generate
_random
_subvolume
_name
()
6448 snapshot
= self
._generate
_random
_snapshot
_name
()
6449 clone1
= self
._generate
_random
_clone
_name
()
6452 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6455 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6457 # snapshot subvolume
6458 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6460 # Insert delay at the beginning of snapshot clone
6461 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6464 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6466 # remove snapshot from backend to force the clone failure.
6467 snappath
= os
.path
.join(".", "volumes", "_nogroup", subvolume
, ".snap", snapshot
)
6468 self
.mount_a
.run_shell(['rmdir', snappath
], sudo
=True)
6470 # wait for clone1 to fail.
6471 self
._wait
_for
_clone
_to
_fail
(clone1
)
6473 # check clone1 status
6474 clone1_result
= self
._get
_clone
_status
(clone1
)
6475 self
.assertEqual(clone1_result
["status"]["state"], "failed")
6476 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "2")
6477 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "snapshot '{0}' does not exist".format(snapshot
))
6479 # clone removal should succeed after failure, remove clone1
6480 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
6483 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6485 # verify trash dir is clean
6486 self
._wait
_for
_trash
_empty
()
6488 def test_clone_failure_status_pending_cancelled(self
):
6490 ensure failure status is shown when clone is cancelled during pending state and validate the reason
6492 subvolume
= self
._generate
_random
_subvolume
_name
()
6493 snapshot
= self
._generate
_random
_snapshot
_name
()
6494 clone1
= self
._generate
_random
_clone
_name
()
6497 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6500 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6502 # snapshot subvolume
6503 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6505 # Insert delay at the beginning of snapshot clone
6506 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6509 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6511 # cancel pending clone1
6512 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone1
)
6514 # check clone1 status
6515 clone1_result
= self
._get
_clone
_status
(clone1
)
6516 self
.assertEqual(clone1_result
["status"]["state"], "canceled")
6517 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "4")
6518 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "user interrupted clone operation")
6520 # clone removal should succeed with force after cancelled, remove clone1
6521 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
6524 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6527 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6529 # verify trash dir is clean
6530 self
._wait
_for
_trash
_empty
()
6532 def test_clone_failure_status_in_progress_cancelled(self
):
6534 ensure failure status is shown when clone is cancelled during in-progress state and validate the reason
6536 subvolume
= self
._generate
_random
_subvolume
_name
()
6537 snapshot
= self
._generate
_random
_snapshot
_name
()
6538 clone1
= self
._generate
_random
_clone
_name
()
6541 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6544 self
._do
_subvolume
_io
(subvolume
, number_of_files
=200)
6546 # snapshot subvolume
6547 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6549 # Insert delay at the beginning of snapshot clone
6550 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 5)
6553 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6555 # wait for clone1 to be in-progress
6556 self
._wait
_for
_clone
_to
_be
_in
_progress
(clone1
)
6558 # cancel in-progess clone1
6559 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone1
)
6561 # check clone1 status
6562 clone1_result
= self
._get
_clone
_status
(clone1
)
6563 self
.assertEqual(clone1_result
["status"]["state"], "canceled")
6564 self
.assertEqual(clone1_result
["status"]["failure"]["errno"], "4")
6565 self
.assertEqual(clone1_result
["status"]["failure"]["error_msg"], "user interrupted clone operation")
6567 # clone removal should succeed with force after cancelled, remove clone1
6568 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
, "--force")
6571 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6574 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6576 # verify trash dir is clean
6577 self
._wait
_for
_trash
_empty
()
6579 def test_subvolume_snapshot_clone(self
):
6580 subvolume
= self
._generate
_random
_subvolume
_name
()
6581 snapshot
= self
._generate
_random
_snapshot
_name
()
6582 clone
= self
._generate
_random
_clone
_name
()
6585 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6588 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6590 # snapshot subvolume
6591 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6594 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6596 # check clone status
6597 self
._wait
_for
_clone
_to
_complete
(clone
)
6600 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6603 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6606 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6607 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6609 # verify trash dir is clean
6610 self
._wait
_for
_trash
_empty
()
6612 def test_subvolume_snapshot_clone_quota_exceeded(self
):
6613 subvolume
= self
._generate
_random
_subvolume
_name
()
6614 snapshot
= self
._generate
_random
_snapshot
_name
()
6615 clone
= self
._generate
_random
_clone
_name
()
6617 # create subvolume with 20MB quota
6618 osize
= self
.DEFAULT_FILE_SIZE
*1024*1024*20
6619 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
,"--mode=777", "--size", str(osize
))
6621 # do IO, write 50 files of 1MB each to exceed quota. This mostly succeeds as quota enforcement takes time.
6622 self
._do
_subvolume
_io
(subvolume
, number_of_files
=50)
6624 # snapshot subvolume
6625 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6628 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6630 # check clone status
6631 self
._wait
_for
_clone
_to
_complete
(clone
)
6634 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6637 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6640 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6641 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6643 # verify trash dir is clean
6644 self
._wait
_for
_trash
_empty
()
6646 def test_subvolume_snapshot_in_complete_clone_rm(self
):
6648 Validates the removal of clone when it is not in 'complete|cancelled|failed' state.
6649 The forceful removl of subvolume clone succeeds only if it's in any of the
6650 'complete|cancelled|failed' states. It fails with EAGAIN in any other states.
6653 subvolume
= self
._generate
_random
_subvolume
_name
()
6654 snapshot
= self
._generate
_random
_snapshot
_name
()
6655 clone
= self
._generate
_random
_clone
_name
()
6658 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6661 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
6663 # snapshot subvolume
6664 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6666 # Insert delay at the beginning of snapshot clone
6667 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6670 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6672 # Use --force since clone is not complete. Returns EAGAIN as clone is not either complete or cancelled.
6674 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6675 except CommandFailedError
as ce
:
6676 if ce
.exitstatus
!= errno
.EAGAIN
:
6677 raise RuntimeError("invalid error code when trying to remove failed clone")
6679 raise RuntimeError("expected error when removing a failed clone")
6681 # cancel on-going clone
6682 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6684 # verify canceled state
6685 self
._check
_clone
_canceled
(clone
)
6687 # clone removal should succeed after cancel
6688 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6691 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6694 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6696 # verify trash dir is clean
6697 self
._wait
_for
_trash
_empty
()
6699 def test_subvolume_snapshot_clone_retain_suid_guid(self
):
6700 subvolume
= self
._generate
_random
_subvolume
_name
()
6701 snapshot
= self
._generate
_random
_snapshot
_name
()
6702 clone
= self
._generate
_random
_clone
_name
()
6705 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6707 # Create a file with suid, guid bits set along with executable bit.
6708 args
= ["subvolume", "getpath", self
.volname
, subvolume
]
6710 subvolpath
= self
._fs
_cmd
(*args
)
6711 self
.assertNotEqual(subvolpath
, None)
6712 subvolpath
= subvolpath
[1:].rstrip() # remove "/" prefix and any trailing newline
6714 file_path
= subvolpath
6715 file_path
= os
.path
.join(subvolpath
, "test_suid_file")
6716 self
.mount_a
.run_shell(["touch", file_path
])
6717 self
.mount_a
.run_shell(["chmod", "u+sx,g+sx", file_path
])
6719 # snapshot subvolume
6720 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6723 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6725 # check clone status
6726 self
._wait
_for
_clone
_to
_complete
(clone
)
6729 self
._verify
_clone
(subvolume
, snapshot
, clone
)
6732 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6735 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6736 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6738 # verify trash dir is clean
6739 self
._wait
_for
_trash
_empty
()
6741 def test_subvolume_snapshot_clone_and_reclone(self
):
6742 subvolume
= self
._generate
_random
_subvolume
_name
()
6743 snapshot
= self
._generate
_random
_snapshot
_name
()
6744 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
6747 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6750 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
6752 # snapshot subvolume
6753 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6756 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
6758 # check clone status
6759 self
._wait
_for
_clone
_to
_complete
(clone1
)
6762 self
._verify
_clone
(subvolume
, snapshot
, clone1
)
6765 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6767 # now the clone is just like a normal subvolume -- snapshot the clone and fork
6768 # another clone. before that do some IO so it's can be differentiated.
6769 self
._do
_subvolume
_io
(clone1
, create_dir
="data", number_of_files
=32)
6771 # snapshot clone -- use same snap name
6772 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, clone1
, snapshot
)
6775 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, clone1
, snapshot
, clone2
)
6777 # check clone status
6778 self
._wait
_for
_clone
_to
_complete
(clone2
)
6781 self
._verify
_clone
(clone1
, snapshot
, clone2
)
6784 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, clone1
, snapshot
)
6787 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6788 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
6789 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
6791 # verify trash dir is clean
6792 self
._wait
_for
_trash
_empty
()
6794 def test_subvolume_snapshot_clone_cancel_in_progress(self
):
6795 subvolume
= self
._generate
_random
_subvolume
_name
()
6796 snapshot
= self
._generate
_random
_snapshot
_name
()
6797 clone
= self
._generate
_random
_clone
_name
()
6800 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6803 self
._do
_subvolume
_io
(subvolume
, number_of_files
=128)
6805 # snapshot subvolume
6806 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6808 # Insert delay at the beginning of snapshot clone
6809 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
6812 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6814 # cancel on-going clone
6815 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6817 # verify canceled state
6818 self
._check
_clone
_canceled
(clone
)
6821 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6824 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6825 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6827 # verify trash dir is clean
6828 self
._wait
_for
_trash
_empty
()
6830 def test_subvolume_snapshot_clone_cancel_pending(self
):
6832 this test is a bit more involved compared to canceling an in-progress clone.
6833 we'd need to ensure that a to-be canceled clone has still not been picked up
6834 by cloner threads. exploit the fact that clones are picked up in an FCFS
6835 fashion and there are four (4) cloner threads by default. When the number of
6836 cloner threads increase, this test _may_ start tripping -- so, the number of
6837 clone operations would need to be jacked up.
6839 # default number of clone threads
6841 # good enough for 4 threads
6843 # yeh, 1gig -- we need the clone to run for sometime
6846 subvolume
= self
._generate
_random
_subvolume
_name
()
6847 snapshot
= self
._generate
_random
_snapshot
_name
()
6848 clones
= self
._generate
_random
_clone
_name
(NR_CLONES
)
6851 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6854 self
._do
_subvolume
_io
(subvolume
, number_of_files
=4, file_size
=FILE_SIZE_MB
)
6856 # snapshot subvolume
6857 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6860 for clone
in clones
:
6861 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
6863 to_wait
= clones
[0:NR_THREADS
]
6864 to_cancel
= clones
[NR_THREADS
:]
6866 # cancel pending clones and verify
6867 for clone
in to_cancel
:
6868 status
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, clone
))
6869 self
.assertEqual(status
["status"]["state"], "pending")
6870 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6871 self
._check
_clone
_canceled
(clone
)
6873 # let's cancel on-going clones. handle the case where some of the clones
6875 for clone
in list(to_wait
):
6877 self
._fs
_cmd
("clone", "cancel", self
.volname
, clone
)
6878 to_cancel
.append(clone
)
6879 to_wait
.remove(clone
)
6880 except CommandFailedError
as ce
:
6881 if ce
.exitstatus
!= errno
.EINVAL
:
6882 raise RuntimeError("invalid error code when cancelling on-going clone")
6885 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6888 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6889 for clone
in to_wait
:
6890 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
6891 for clone
in to_cancel
:
6892 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, "--force")
6894 # verify trash dir is clean
6895 self
._wait
_for
_trash
_empty
()
6897 def test_subvolume_snapshot_clone_different_groups(self
):
6898 subvolume
= self
._generate
_random
_subvolume
_name
()
6899 snapshot
= self
._generate
_random
_snapshot
_name
()
6900 clone
= self
._generate
_random
_clone
_name
()
6901 s_group
, c_group
= self
._generate
_random
_group
_name
(2)
6904 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, s_group
)
6905 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, c_group
)
6908 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, s_group
, "--mode=777")
6911 self
._do
_subvolume
_io
(subvolume
, subvolume_group
=s_group
, number_of_files
=32)
6913 # snapshot subvolume
6914 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, s_group
)
6917 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
,
6918 '--group_name', s_group
, '--target_group_name', c_group
)
6920 # check clone status
6921 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=c_group
)
6924 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_group
=s_group
, clone_group
=c_group
)
6927 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, s_group
)
6930 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, s_group
)
6931 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, c_group
)
6934 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, s_group
)
6935 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, c_group
)
6937 # verify trash dir is clean
6938 self
._wait
_for
_trash
_empty
()
6940 def test_subvolume_snapshot_clone_fail_with_remove(self
):
6941 subvolume
= self
._generate
_random
_subvolume
_name
()
6942 snapshot
= self
._generate
_random
_snapshot
_name
()
6943 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
6945 pool_capacity
= 32 * 1024 * 1024
6946 # number of files required to fill up 99% of the pool
6947 nr_files
= int((pool_capacity
* 0.99) / (TestVolumes
.DEFAULT_FILE_SIZE
* 1024 * 1024))
6950 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
6953 self
._do
_subvolume
_io
(subvolume
, number_of_files
=nr_files
)
6955 # snapshot subvolume
6956 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
6959 new_pool
= "new_pool"
6960 self
.fs
.add_data_pool(new_pool
)
6962 self
.fs
.mon_manager
.raw_cluster_cmd("osd", "pool", "set-quota", new_pool
,
6963 "max_bytes", "{0}".format(pool_capacity
// 4))
6966 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
, "--pool_layout", new_pool
)
6968 # check clone status -- this should dramatically overshoot the pool quota
6969 self
._wait
_for
_clone
_to
_complete
(clone1
)
6972 self
._verify
_clone
(subvolume
, snapshot
, clone1
, clone_pool
=new_pool
)
6974 # wait a bit so that subsequent I/O will give pool full error
6978 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone2
, "--pool_layout", new_pool
)
6980 # check clone status
6981 self
._wait
_for
_clone
_to
_fail
(clone2
)
6984 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
6987 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
6988 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
6990 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
6991 except CommandFailedError
as ce
:
6992 if ce
.exitstatus
!= errno
.EAGAIN
:
6993 raise RuntimeError("invalid error code when trying to remove failed clone")
6995 raise RuntimeError("expected error when removing a failed clone")
6997 # ... and with force, failed clone can be removed
6998 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
, "--force")
7000 # verify trash dir is clean
7001 self
._wait
_for
_trash
_empty
()
7003 def test_subvolume_snapshot_clone_on_existing_subvolumes(self
):
7004 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
7005 snapshot
= self
._generate
_random
_snapshot
_name
()
7006 clone
= self
._generate
_random
_clone
_name
()
7009 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume1
, "--mode=777")
7010 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume2
, "--mode=777")
7013 self
._do
_subvolume
_io
(subvolume1
, number_of_files
=32)
7015 # snapshot subvolume
7016 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume1
, snapshot
)
7018 # schedule a clone with target as subvolume2
7020 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, subvolume2
)
7021 except CommandFailedError
as ce
:
7022 if ce
.exitstatus
!= errno
.EEXIST
:
7023 raise RuntimeError("invalid error code when cloning to existing subvolume")
7025 raise RuntimeError("expected cloning to fail if the target is an existing subvolume")
7027 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, clone
)
7029 # schedule a clone with target as clone
7031 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume1
, snapshot
, clone
)
7032 except CommandFailedError
as ce
:
7033 if ce
.exitstatus
!= errno
.EEXIST
:
7034 raise RuntimeError("invalid error code when cloning to existing clone")
7036 raise RuntimeError("expected cloning to fail if the target is an existing clone")
7038 # check clone status
7039 self
._wait
_for
_clone
_to
_complete
(clone
)
7042 self
._verify
_clone
(subvolume1
, snapshot
, clone
)
7045 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume1
, snapshot
)
7048 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7049 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
)
7050 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7052 # verify trash dir is clean
7053 self
._wait
_for
_trash
_empty
()
7055 def test_subvolume_snapshot_clone_pool_layout(self
):
7056 subvolume
= self
._generate
_random
_subvolume
_name
()
7057 snapshot
= self
._generate
_random
_snapshot
_name
()
7058 clone
= self
._generate
_random
_clone
_name
()
7061 new_pool
= "new_pool"
7062 newid
= self
.fs
.add_data_pool(new_pool
)
7065 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
7068 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
7070 # snapshot subvolume
7071 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7074 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, "--pool_layout", new_pool
)
7076 # check clone status
7077 self
._wait
_for
_clone
_to
_complete
(clone
)
7080 self
._verify
_clone
(subvolume
, snapshot
, clone
, clone_pool
=new_pool
)
7083 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7085 subvol_path
= self
._get
_subvolume
_path
(self
.volname
, clone
)
7086 desired_pool
= self
.mount_a
.getfattr(subvol_path
, "ceph.dir.layout.pool")
7088 self
.assertEqual(desired_pool
, new_pool
)
7089 except AssertionError:
7090 self
.assertEqual(int(desired_pool
), newid
) # old kernel returns id
7093 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7094 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7096 # verify trash dir is clean
7097 self
._wait
_for
_trash
_empty
()
7099 def test_subvolume_snapshot_clone_under_group(self
):
7100 subvolume
= self
._generate
_random
_subvolume
_name
()
7101 snapshot
= self
._generate
_random
_snapshot
_name
()
7102 clone
= self
._generate
_random
_clone
_name
()
7103 group
= self
._generate
_random
_group
_name
()
7106 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode=777")
7109 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
7111 # snapshot subvolume
7112 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7115 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
7118 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, '--target_group_name', group
)
7120 # check clone status
7121 self
._wait
_for
_clone
_to
_complete
(clone
, clone_group
=group
)
7124 self
._verify
_clone
(subvolume
, snapshot
, clone
, clone_group
=group
)
7127 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7130 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7131 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
, group
)
7134 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7136 # verify trash dir is clean
7137 self
._wait
_for
_trash
_empty
()
7139 def test_subvolume_snapshot_clone_with_attrs(self
):
7140 subvolume
= self
._generate
_random
_subvolume
_name
()
7141 snapshot
= self
._generate
_random
_snapshot
_name
()
7142 clone
= self
._generate
_random
_clone
_name
()
7152 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", mode
, "--uid", uid
, "--gid", gid
)
7155 self
._do
_subvolume
_io
(subvolume
, number_of_files
=32)
7157 # snapshot subvolume
7158 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7160 # change subvolume attrs (to ensure clone picks up snapshot attrs)
7161 self
._do
_subvolume
_attr
_update
(subvolume
, new_uid
, new_gid
, new_mode
)
7164 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
7166 # check clone status
7167 self
._wait
_for
_clone
_to
_complete
(clone
)
7170 self
._verify
_clone
(subvolume
, snapshot
, clone
)
7173 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7176 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7177 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7179 # verify trash dir is clean
7180 self
._wait
_for
_trash
_empty
()
7182 def test_subvolume_snapshot_clone_with_upgrade(self
):
7184 yet another poor man's upgrade test -- rather than going through a full
7185 upgrade cycle, emulate old types subvolumes by going through the wormhole
7186 and verify clone operation.
7187 further ensure that a legacy volume is not updated to v2, but clone is.
7189 subvolume
= self
._generate
_random
_subvolume
_name
()
7190 snapshot
= self
._generate
_random
_snapshot
_name
()
7191 clone
= self
._generate
_random
_clone
_name
()
7193 # emulate a old-fashioned subvolume
7194 createpath
= os
.path
.join(".", "volumes", "_nogroup", subvolume
)
7195 self
.mount_a
.run_shell_payload(f
"mkdir -p -m 777 {createpath}", sudo
=True)
7197 # add required xattrs to subvolume
7198 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7199 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7202 self
._do
_subvolume
_io
(subvolume
, number_of_files
=64)
7204 # snapshot subvolume
7205 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7207 # ensure metadata file is in legacy location, with required version v1
7208 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume
, version
=1, legacy
=True)
7210 # Insert delay at the beginning of snapshot clone
7211 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7214 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
)
7216 # snapshot should not be deletable now
7218 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7219 except CommandFailedError
as ce
:
7220 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, msg
="invalid error code when removing source snapshot of a clone")
7222 self
.fail("expected removing source snapshot of a clone to fail")
7224 # check clone status
7225 self
._wait
_for
_clone
_to
_complete
(clone
)
7228 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_version
=1)
7231 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7233 # ensure metadata file is in v2 location, with required version v2
7234 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone
)
7237 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7238 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7240 # verify trash dir is clean
7241 self
._wait
_for
_trash
_empty
()
7243 def test_subvolume_snapshot_reconf_max_concurrent_clones(self
):
7245 Validate 'max_concurrent_clones' config option
7248 # get the default number of cloner threads
7249 default_max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7250 self
.assertEqual(default_max_concurrent_clones
, 4)
7252 # Increase number of cloner threads
7253 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6)
7254 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7255 self
.assertEqual(max_concurrent_clones
, 6)
7257 # Decrease number of cloner threads
7258 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7259 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7260 self
.assertEqual(max_concurrent_clones
, 2)
7262 def test_subvolume_snapshot_config_snapshot_clone_delay(self
):
7264 Validate 'snapshot_clone_delay' config option
7267 # get the default delay before starting the clone
7268 default_timeout
= int(self
.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7269 self
.assertEqual(default_timeout
, 0)
7271 # Insert delay of 2 seconds at the beginning of the snapshot clone
7272 self
.config_set('mgr', 'mgr/volumes/snapshot_clone_delay', 2)
7273 default_timeout
= int(self
.config_get('mgr', 'mgr/volumes/snapshot_clone_delay'))
7274 self
.assertEqual(default_timeout
, 2)
7276 # Decrease number of cloner threads
7277 self
.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2)
7278 max_concurrent_clones
= int(self
.config_get('mgr', 'mgr/volumes/max_concurrent_clones'))
7279 self
.assertEqual(max_concurrent_clones
, 2)
7281 def test_subvolume_under_group_snapshot_clone(self
):
7282 subvolume
= self
._generate
_random
_subvolume
_name
()
7283 group
= self
._generate
_random
_group
_name
()
7284 snapshot
= self
._generate
_random
_snapshot
_name
()
7285 clone
= self
._generate
_random
_clone
_name
()
7288 self
._fs
_cmd
("subvolumegroup", "create", self
.volname
, group
)
7291 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, group
, "--mode=777")
7294 self
._do
_subvolume
_io
(subvolume
, subvolume_group
=group
, number_of_files
=32)
7296 # snapshot subvolume
7297 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
, group
)
7300 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone
, '--group_name', group
)
7302 # check clone status
7303 self
._wait
_for
_clone
_to
_complete
(clone
)
7306 self
._verify
_clone
(subvolume
, snapshot
, clone
, source_group
=group
)
7309 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
, group
)
7312 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
, group
)
7313 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone
)
7316 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7318 # verify trash dir is clean
7319 self
._wait
_for
_trash
_empty
()
7322 class TestMisc(TestVolumesHelper
):
7323 """Miscellaneous tests related to FS volume, subvolume group, and subvolume operations."""
7324 def test_connection_expiration(self
):
7325 # unmount any cephfs mounts
7326 for i
in range(0, self
.CLIENTS_REQUIRED
):
7327 self
.mounts
[i
].umount_wait()
7328 sessions
= self
._session
_list
()
7329 self
.assertLessEqual(len(sessions
), 1) # maybe mgr is already mounted
7331 # Get the mgr to definitely mount cephfs
7332 subvolume
= self
._generate
_random
_subvolume
_name
()
7333 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
7334 sessions
= self
._session
_list
()
7335 self
.assertEqual(len(sessions
), 1)
7337 # Now wait for the mgr to expire the connection:
7338 self
.wait_until_evicted(sessions
[0]['id'], timeout
=90)
7340 def test_mgr_eviction(self
):
7341 # unmount any cephfs mounts
7342 for i
in range(0, self
.CLIENTS_REQUIRED
):
7343 self
.mounts
[i
].umount_wait()
7344 sessions
= self
._session
_list
()
7345 self
.assertLessEqual(len(sessions
), 1) # maybe mgr is already mounted
7347 # Get the mgr to definitely mount cephfs
7348 subvolume
= self
._generate
_random
_subvolume
_name
()
7349 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
)
7350 sessions
= self
._session
_list
()
7351 self
.assertEqual(len(sessions
), 1)
7353 # Now fail the mgr, check the session was evicted
7354 mgr
= self
.mgr_cluster
.get_active_id()
7355 self
.mgr_cluster
.mgr_fail(mgr
)
7356 self
.wait_until_evicted(sessions
[0]['id'])
7358 def test_names_can_only_be_goodchars(self
):
7360 Test the creating vols, subvols subvolgroups fails when their names uses
7361 characters beyond [a-zA-Z0-9 -_.].
7363 volname
, badname
= 'testvol', 'abcd@#'
7365 with self
.assertRaises(CommandFailedError
):
7366 self
._fs
_cmd
('volume', 'create', badname
)
7367 self
._fs
_cmd
('volume', 'create', volname
)
7369 with self
.assertRaises(CommandFailedError
):
7370 self
._fs
_cmd
('subvolumegroup', 'create', volname
, badname
)
7372 with self
.assertRaises(CommandFailedError
):
7373 self
._fs
_cmd
('subvolume', 'create', volname
, badname
)
7374 self
._fs
_cmd
('volume', 'rm', volname
, '--yes-i-really-mean-it')
7376 def test_subvolume_ops_on_nonexistent_vol(self
):
7377 # tests the fs subvolume operations on non existing volume
7379 volname
= "non_existent_subvolume"
7381 # try subvolume operations
7382 for op
in ("create", "rm", "getpath", "info", "resize", "pin", "ls"):
7385 self
._fs
_cmd
("subvolume", "resize", volname
, "subvolname_1", "inf")
7387 self
._fs
_cmd
("subvolume", "pin", volname
, "subvolname_1", "export", "1")
7389 self
._fs
_cmd
("subvolume", "ls", volname
)
7391 self
._fs
_cmd
("subvolume", op
, volname
, "subvolume_1")
7392 except CommandFailedError
as ce
:
7393 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7395 self
.fail("expected the 'fs subvolume {0}' command to fail".format(op
))
7397 # try subvolume snapshot operations and clone create
7398 for op
in ("create", "rm", "info", "protect", "unprotect", "ls", "clone"):
7401 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1")
7403 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1", "snapshot_1", "clone_1")
7405 self
._fs
_cmd
("subvolume", "snapshot", op
, volname
, "subvolume_1", "snapshot_1")
7406 except CommandFailedError
as ce
:
7407 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7409 self
.fail("expected the 'fs subvolume snapshot {0}' command to fail".format(op
))
7413 self
._fs
_cmd
("clone", "status", volname
, "clone_1")
7414 except CommandFailedError
as ce
:
7415 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7417 self
.fail("expected the 'fs clone status' command to fail")
7419 # try subvolumegroup operations
7420 for op
in ("create", "rm", "getpath", "pin", "ls"):
7423 self
._fs
_cmd
("subvolumegroup", "pin", volname
, "group_1", "export", "0")
7425 self
._fs
_cmd
("subvolumegroup", op
, volname
)
7427 self
._fs
_cmd
("subvolumegroup", op
, volname
, "group_1")
7428 except CommandFailedError
as ce
:
7429 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7431 self
.fail("expected the 'fs subvolumegroup {0}' command to fail".format(op
))
7433 # try subvolumegroup snapshot operations
7434 for op
in ("create", "rm", "ls"):
7437 self
._fs
_cmd
("subvolumegroup", "snapshot", op
, volname
, "group_1")
7439 self
._fs
_cmd
("subvolumegroup", "snapshot", op
, volname
, "group_1", "snapshot_1")
7440 except CommandFailedError
as ce
:
7441 self
.assertEqual(ce
.exitstatus
, errno
.ENOENT
)
7443 self
.fail("expected the 'fs subvolumegroup snapshot {0}' command to fail".format(op
))
7445 def test_subvolume_upgrade_legacy_to_v1(self
):
7447 poor man's upgrade test -- rather than going through a full upgrade cycle,
7448 emulate subvolumes by going through the wormhole and verify if they are
7450 further ensure that a legacy volume is not updated to v2.
7452 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
7453 group
= self
._generate
_random
_group
_name
()
7455 # emulate a old-fashioned subvolume -- one in the default group and
7456 # the other in a custom group
7457 createpath1
= os
.path
.join(".", "volumes", "_nogroup", subvolume1
)
7458 self
.mount_a
.run_shell(['mkdir', '-p', createpath1
], sudo
=True)
7461 createpath2
= os
.path
.join(".", "volumes", group
, subvolume2
)
7462 self
.mount_a
.run_shell(['mkdir', '-p', createpath2
], sudo
=True)
7464 # this would auto-upgrade on access without anyone noticing
7465 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume1
)
7466 self
.assertNotEqual(subvolpath1
, None)
7467 subvolpath1
= subvolpath1
.rstrip() # remove "/" prefix and any trailing newline
7469 subvolpath2
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvolume2
, group
)
7470 self
.assertNotEqual(subvolpath2
, None)
7471 subvolpath2
= subvolpath2
.rstrip() # remove "/" prefix and any trailing newline
7473 # and... the subvolume path returned should be what we created behind the scene
7474 self
.assertEqual(createpath1
[1:], subvolpath1
)
7475 self
.assertEqual(createpath2
[1:], subvolpath2
)
7477 # ensure metadata file is in legacy location, with required version v1
7478 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=1, legacy
=True)
7479 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=1, legacy
=True)
7482 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7483 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
7485 # verify trash dir is clean
7486 self
._wait
_for
_trash
_empty
()
7489 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7491 def test_subvolume_no_upgrade_v1_sanity(self
):
7493 poor man's upgrade test -- theme continues...
7495 This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
7496 a series of operations on the v1 subvolume to ensure they work as expected.
7498 subvol_md
= ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
7499 "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
7500 "type", "uid", "features", "state"]
7501 snap_md
= ["created_at", "data_pool", "has_pending_clones"]
7503 subvolume
= self
._generate
_random
_subvolume
_name
()
7504 snapshot
= self
._generate
_random
_snapshot
_name
()
7505 clone1
, clone2
= self
._generate
_random
_clone
_name
(2)
7510 # emulate a v1 subvolume -- in the default group
7511 subvolume_path
= self
._create
_v
1_subvolume
(subvolume
)
7514 subvolpath
= self
._get
_subvolume
_path
(self
.volname
, subvolume
)
7515 self
.assertEqual(subvolpath
, subvolume_path
)
7518 subvolumes
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
7519 self
.assertEqual(len(subvolumes
), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes
)))
7520 self
.assertEqual(subvolumes
[0]['name'], subvolume
,
7521 "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume
, subvolumes
[0]['name']))
7524 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
7525 for md
in subvol_md
:
7526 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
7528 self
.assertEqual(subvol_info
["state"], "complete",
7529 msg
="expected state to be 'complete', found '{0}".format(subvol_info
["state"]))
7530 self
.assertEqual(len(subvol_info
["features"]), 2,
7531 msg
="expected 1 feature, found '{0}' ({1})".format(len(subvol_info
["features"]), subvol_info
["features"]))
7532 for feature
in ['snapshot-clone', 'snapshot-autoprotect']:
7533 self
.assertIn(feature
, subvol_info
["features"], msg
="expected feature '{0}' in subvolume".format(feature
))
7536 nsize
= self
.DEFAULT_FILE_SIZE
*1024*1024*10
7537 self
._fs
_cmd
("subvolume", "resize", self
.volname
, subvolume
, str(nsize
))
7538 subvol_info
= json
.loads(self
._get
_subvolume
_info
(self
.volname
, subvolume
))
7539 for md
in subvol_md
:
7540 self
.assertIn(md
, subvol_info
, "'{0}' key not present in metadata of subvolume".format(md
))
7541 self
.assertEqual(subvol_info
["bytes_quota"], nsize
, "bytes_quota should be set to '{0}'".format(nsize
))
7543 # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone)
7544 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvolume
, "--mode", mode
, "--uid", uid
, "--gid", gid
)
7547 self
._do
_subvolume
_io
(subvolume
, number_of_files
=8)
7550 self
._fs
_cmd
("subvolume", "snapshot", "create", self
.volname
, subvolume
, snapshot
)
7553 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, snapshot
, clone1
)
7555 # check clone status
7556 self
._wait
_for
_clone
_to
_complete
(clone1
)
7558 # ensure clone is v2
7559 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone1
, version
=2)
7562 self
._verify
_clone
(subvolume
, snapshot
, clone1
, source_version
=1)
7564 # clone (older snapshot)
7565 self
._fs
_cmd
("subvolume", "snapshot", "clone", self
.volname
, subvolume
, 'fake', clone2
)
7567 # check clone status
7568 self
._wait
_for
_clone
_to
_complete
(clone2
)
7570 # ensure clone is v2
7571 self
._assert
_meta
_location
_and
_version
(self
.volname
, clone2
, version
=2)
7574 # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747
7575 #self._verify_clone(subvolume, 'fake', clone2, source_version=1)
7578 snap_info
= json
.loads(self
._get
_subvolume
_snapshot
_info
(self
.volname
, subvolume
, snapshot
))
7580 self
.assertIn(md
, snap_info
, "'{0}' key not present in metadata of snapshot".format(md
))
7581 self
.assertEqual(snap_info
["has_pending_clones"], "no")
7584 subvol_snapshots
= json
.loads(self
._fs
_cmd
('subvolume', 'snapshot', 'ls', self
.volname
, subvolume
))
7585 self
.assertEqual(len(subvol_snapshots
), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots
)))
7586 snapshotnames
= [snapshot
['name'] for snapshot
in subvol_snapshots
]
7587 for name
in [snapshot
, 'fake']:
7588 self
.assertIn(name
, snapshotnames
, msg
="expected snapshot '{0}' in subvolume snapshot ls".format(name
))
7591 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, snapshot
)
7592 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume
, "fake")
7594 # ensure volume is still at version 1
7595 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume
, version
=1)
7598 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume
)
7599 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone1
)
7600 self
._fs
_cmd
("subvolume", "rm", self
.volname
, clone2
)
7602 # verify trash dir is clean
7603 self
._wait
_for
_trash
_empty
()
7605 def test_subvolume_no_upgrade_v1_to_v2(self
):
7607 poor man's upgrade test -- theme continues...
7608 ensure v1 to v2 upgrades are not done automatically due to various states of v1
7610 subvolume1
, subvolume2
, subvolume3
= self
._generate
_random
_subvolume
_name
(3)
7611 group
= self
._generate
_random
_group
_name
()
7613 # emulate a v1 subvolume -- in the default group
7614 subvol1_path
= self
._create
_v
1_subvolume
(subvolume1
)
7616 # emulate a v1 subvolume -- in a custom group
7617 subvol2_path
= self
._create
_v
1_subvolume
(subvolume2
, subvol_group
=group
)
7619 # emulate a v1 subvolume -- in a clone pending state
7620 self
._create
_v
1_subvolume
(subvolume3
, subvol_type
='clone', has_snapshot
=False, state
='pending')
7622 # this would attempt auto-upgrade on access, but fail to do so as snapshots exist
7623 subvolpath1
= self
._get
_subvolume
_path
(self
.volname
, subvolume1
)
7624 self
.assertEqual(subvolpath1
, subvol1_path
)
7626 subvolpath2
= self
._get
_subvolume
_path
(self
.volname
, subvolume2
, group_name
=group
)
7627 self
.assertEqual(subvolpath2
, subvol2_path
)
7629 # this would attempt auto-upgrade on access, but fail to do so as volume is not complete
7630 # use clone status, as only certain operations are allowed in pending state
7631 status
= json
.loads(self
._fs
_cmd
("clone", "status", self
.volname
, subvolume3
))
7632 self
.assertEqual(status
["status"]["state"], "pending")
7635 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume1
, "fake")
7636 self
._fs
_cmd
("subvolume", "snapshot", "rm", self
.volname
, subvolume2
, "fake", group
)
7638 # ensure metadata file is in v1 location, with version retained as v1
7639 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=1)
7640 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=1)
7643 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7644 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
7646 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume3
)
7647 except CommandFailedError
as ce
:
7648 self
.assertEqual(ce
.exitstatus
, errno
.EAGAIN
, "invalid error code on rm of subvolume undergoing clone")
7650 self
.fail("expected rm of subvolume undergoing clone to fail")
7652 # ensure metadata file is in v1 location, with version retained as v1
7653 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume3
, version
=1)
7654 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume3
, "--force")
7656 # verify list subvolumes returns an empty list
7657 subvolumels
= json
.loads(self
._fs
_cmd
('subvolume', 'ls', self
.volname
))
7658 self
.assertEqual(len(subvolumels
), 0)
7660 # verify trash dir is clean
7661 self
._wait
_for
_trash
_empty
()
7663 def test_subvolume_upgrade_v1_to_v2(self
):
7665 poor man's upgrade test -- theme continues...
7666 ensure v1 to v2 upgrades work
7668 subvolume1
, subvolume2
= self
._generate
_random
_subvolume
_name
(2)
7669 group
= self
._generate
_random
_group
_name
()
7671 # emulate a v1 subvolume -- in the default group
7672 subvol1_path
= self
._create
_v
1_subvolume
(subvolume1
, has_snapshot
=False)
7674 # emulate a v1 subvolume -- in a custom group
7675 subvol2_path
= self
._create
_v
1_subvolume
(subvolume2
, subvol_group
=group
, has_snapshot
=False)
7677 # this would attempt auto-upgrade on access
7678 subvolpath1
= self
._get
_subvolume
_path
(self
.volname
, subvolume1
)
7679 self
.assertEqual(subvolpath1
, subvol1_path
)
7681 subvolpath2
= self
._get
_subvolume
_path
(self
.volname
, subvolume2
, group_name
=group
)
7682 self
.assertEqual(subvolpath2
, subvol2_path
)
7684 # ensure metadata file is in v2 location, with version retained as v2
7685 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume1
, version
=2)
7686 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvolume2
, subvol_group
=group
, version
=2)
7689 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume1
)
7690 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvolume2
, group
)
7692 # verify trash dir is clean
7693 self
._wait
_for
_trash
_empty
()
7695 def test_malicious_metafile_on_legacy_to_v1_upgrade(self
):
7697 Validate handcrafted .meta file on legacy subvol root doesn't break the system
7698 on legacy subvol upgrade to v1
7699 poor man's upgrade test -- theme continues...
7701 subvol1
, subvol2
= self
._generate
_random
_subvolume
_name
(2)
7703 # emulate a old-fashioned subvolume in the default group
7704 createpath1
= os
.path
.join(".", "volumes", "_nogroup", subvol1
)
7705 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath1
], omit_sudo
=False)
7707 # add required xattrs to subvolume
7708 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7709 self
.mount_a
.setfattr(createpath1
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7711 # create v2 subvolume
7712 self
._fs
_cmd
("subvolume", "create", self
.volname
, subvol2
)
7714 # Create malicious .meta file in legacy subvolume root. Copy v2 subvolume
7715 # .meta into legacy subvol1's root
7716 subvol2_metapath
= os
.path
.join(".", "volumes", "_nogroup", subvol2
, ".meta")
7717 self
.mount_a
.run_shell(['sudo', 'cp', subvol2_metapath
, createpath1
], omit_sudo
=False)
7719 # Upgrade legacy subvol1 to v1
7720 subvolpath1
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol1
)
7721 self
.assertNotEqual(subvolpath1
, None)
7722 subvolpath1
= subvolpath1
.rstrip()
7724 # the subvolume path returned should not be of subvol2 from handcrafted
7726 self
.assertEqual(createpath1
[1:], subvolpath1
)
7728 # ensure metadata file is in legacy location, with required version v1
7729 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol1
, version
=1, legacy
=True)
7731 # Authorize alice authID read-write access to subvol1. Verify it authorizes subvol1 path and not subvol2
7732 # path whose '.meta' file is copied to subvol1 root
7734 self
._fs
_cmd
("subvolume", "authorize", self
.volname
, subvol1
, authid1
)
7736 # Validate that the mds path added is of subvol1 and not of subvol2
7737 out
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
7738 self
.assertEqual("client.alice", out
[0]["entity"])
7739 self
.assertEqual("allow rw path={0}".format(createpath1
[1:]), out
[0]["caps"]["mds"])
7742 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol1
)
7743 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol2
)
7745 # verify trash dir is clean
7746 self
._wait
_for
_trash
_empty
()
7748 def test_binary_metafile_on_legacy_to_v1_upgrade(self
):
7750 Validate binary .meta file on legacy subvol root doesn't break the system
7751 on legacy subvol upgrade to v1
7752 poor man's upgrade test -- theme continues...
7754 subvol
= self
._generate
_random
_subvolume
_name
()
7755 group
= self
._generate
_random
_group
_name
()
7757 # emulate a old-fashioned subvolume -- in a custom group
7758 createpath
= os
.path
.join(".", "volumes", group
, subvol
)
7759 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
7761 # add required xattrs to subvolume
7762 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7763 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7765 # Create unparseable binary .meta file on legacy subvol's root
7766 meta_contents
= os
.urandom(4096)
7767 meta_filepath
= os
.path
.join(self
.mount_a
.mountpoint
, createpath
, ".meta")
7768 self
.mount_a
.client_remote
.write_file(meta_filepath
, meta_contents
, sudo
=True)
7770 # Upgrade legacy subvol to v1
7771 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol
, group
)
7772 self
.assertNotEqual(subvolpath
, None)
7773 subvolpath
= subvolpath
.rstrip()
7775 # The legacy subvolume path should be returned for subvol.
7776 # Should ignore unparseable binary .meta file in subvol's root
7777 self
.assertEqual(createpath
[1:], subvolpath
)
7779 # ensure metadata file is in legacy location, with required version v1
7780 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol
, subvol_group
=group
, version
=1, legacy
=True)
7783 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol
, group
)
7785 # verify trash dir is clean
7786 self
._wait
_for
_trash
_empty
()
7789 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)
7791 def test_unparseable_metafile_on_legacy_to_v1_upgrade(self
):
7793 Validate unparseable text .meta file on legacy subvol root doesn't break the system
7794 on legacy subvol upgrade to v1
7795 poor man's upgrade test -- theme continues...
7797 subvol
= self
._generate
_random
_subvolume
_name
()
7798 group
= self
._generate
_random
_group
_name
()
7800 # emulate a old-fashioned subvolume -- in a custom group
7801 createpath
= os
.path
.join(".", "volumes", group
, subvol
)
7802 self
.mount_a
.run_shell(['sudo', 'mkdir', '-p', createpath
], omit_sudo
=False)
7804 # add required xattrs to subvolume
7805 default_pool
= self
.mount_a
.getfattr(".", "ceph.dir.layout.pool")
7806 self
.mount_a
.setfattr(createpath
, 'ceph.dir.layout.pool', default_pool
, sudo
=True)
7808 # Create unparseable text .meta file on legacy subvol's root
7809 meta_contents
= "unparseable config\nfile ...\nunparseable config\nfile ...\n"
7810 meta_filepath
= os
.path
.join(self
.mount_a
.mountpoint
, createpath
, ".meta")
7811 self
.mount_a
.client_remote
.write_file(meta_filepath
, meta_contents
, sudo
=True)
7813 # Upgrade legacy subvol to v1
7814 subvolpath
= self
._fs
_cmd
("subvolume", "getpath", self
.volname
, subvol
, group
)
7815 self
.assertNotEqual(subvolpath
, None)
7816 subvolpath
= subvolpath
.rstrip()
7818 # The legacy subvolume path should be returned for subvol.
7819 # Should ignore unparseable binary .meta file in subvol's root
7820 self
.assertEqual(createpath
[1:], subvolpath
)
7822 # ensure metadata file is in legacy location, with required version v1
7823 self
._assert
_meta
_location
_and
_version
(self
.volname
, subvol
, subvol_group
=group
, version
=1, legacy
=True)
7826 self
._fs
_cmd
("subvolume", "rm", self
.volname
, subvol
, group
)
7828 # verify trash dir is clean
7829 self
._wait
_for
_trash
_empty
()
7832 self
._fs
_cmd
("subvolumegroup", "rm", self
.volname
, group
)