subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
# mode
- self.mount_a.run_shell(['chmod', mode, subvolpath], sudo=True)
+ self.mount_a.run_shell(['sudo', 'chmod', mode, subvolpath], omit_sudo=False)
# ownership
- self.mount_a.run_shell(['chown', uid, subvolpath], sudo=True)
- self.mount_a.run_shell(['chgrp', gid, subvolpath], sudo=True)
+ self.mount_a.run_shell(['sudo', 'chown', uid, subvolpath], omit_sudo=False)
+ self.mount_a.run_shell(['sudo', 'chgrp', gid, subvolpath], omit_sudo=False)
def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None,
number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE):
self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path1])
self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path2])
# flip ownership to nobody. assumption: nobody's id is 65534
- self.mount_a.run_shell(["chown", "-h", "65534:65534", sym_path2], sudo=True, omit_sudo=False)
+ self.mount_a.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2], omit_sudo=False)
def _wait_for_trash_empty(self, timeout=60):
# XXX: construct the trash dir path (note that there is no mgr
group = subvol_group if subvol_group is not None else '_nogroup'
metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
- out = self.mount_a.run_shell(['cat', metapath], sudo=True)
+ out = self.mount_a.run_shell(['sudo', 'cat', metapath], omit_sudo=False)
lines = out.stdout.getvalue().strip().split('\n')
sv_version = -1
for line in lines:
basepath = os.path.join("volumes", group, subvol_name)
uuid_str = str(uuid.uuid4())
createpath = os.path.join(basepath, uuid_str)
- self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
# create a v1 snapshot, to prevent auto upgrades
if has_snapshot:
snappath = os.path.join(createpath, ".snap", "fake")
- self.mount_a.run_shell(['mkdir', '-p', snappath], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', snappath], omit_sudo=False)
# add required xattrs to subvolume
default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
group = subvol_group if subvol_group is not None else '_nogroup'
trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name)
if create:
- self.mount_a.run_shell(['mkdir', '-p', trashpath], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', trashpath], omit_sudo=False)
else:
- self.mount_a.run_shell(['rmdir', trashpath], sudo=True)
+ self.mount_a.run_shell(['sudo', 'rmdir', trashpath], omit_sudo=False)
def _configure_guest_auth(self, guest_mount, authid, key):
"""
# emulate a old-fashioned subvolume -- in a custom group
createpath1 = os.path.join(".", "volumes", group, subvolume)
- self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
# this would auto-upgrade on access without anyone noticing
subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, "--group-name", group)
# emulate a old-fashioned subvolume in a custom group
createpath = os.path.join(".", "volumes", group, subvolume)
- self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
# add required xattrs to subvolume
default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
# Induce partial auth update state by modifying the auth metadata file,
# and then run authorize again.
- guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
+ guest_mount.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
# Authorize 'guestclient_1' to access the subvolume.
self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
# Induce partial auth update state by modifying the auth metadata file,
# and then run de-authorize.
- guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
+ guest_mount.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
# Deauthorize 'guestclient_1' to access the subvolume2.
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"],
self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
# Replace 'subvolumes' to 'volumes', old style auth-metadata file
- guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
+ guest_mount.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
# Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
# Replace 'subvolumes' to 'volumes', old style auth-metadata file
- guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
+ guest_mount.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
# Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
# emulate a old-fashioned subvolume in a custom group
createpath = os.path.join(".", "volumes", group, subvolname)
- self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
# set metadata for subvolume.
key = "key"
# emulate a old-fashioned subvolume in a custom group
createpath = os.path.join(".", "volumes", group, subvolname)
- self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
# set metadata for subvolume.
input_metadata_dict = {f'key_{i}' : f'value_{i}' for i in range(3)}
# Create snapshot at ancestral level
ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1")
ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2")
- self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1, ancestral_snappath2], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1, ancestral_snappath2], omit_sudo=False)
subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group))
self.assertEqual(len(subvolsnapshotls), snap_count)
# remove ancestral snapshots
- self.mount_a.run_shell(['rmdir', ancestral_snappath1, ancestral_snappath2], sudo=True)
+ self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1, ancestral_snappath2], omit_sudo=False)
# remove snapshot
for snapshot in snapshots:
# Create snapshot at ancestral level
ancestral_snap_name = "ancestral_snap_1"
ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
- self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1], omit_sudo=False)
# Validate existence of inherited snapshot
group_path = os.path.join(".", "volumes", group)
self.fail("expected snapshot info of inherited snapshot to fail")
# remove ancestral snapshots
- self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
+ self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1], omit_sudo=False)
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
# Create snapshot at ancestral level
ancestral_snap_name = "ancestral_snap_1"
ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
- self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1], omit_sudo=False)
# Validate existence of inherited snap
group_path = os.path.join(".", "volumes", group)
self.fail("expected removing inheirted snapshot to fail")
# remove ancestral snapshots
- self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
+ self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1], omit_sudo=False)
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
# Create subvolumegroup snapshot
group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot)
- self.mount_a.run_shell(['mkdir', '-p', group_snapshot_path], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', group_snapshot_path], omit_sudo=False)
# Validate existence of subvolumegroup snapshot
self.mount_a.run_shell(['ls', group_snapshot_path])
self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
# remove subvolumegroup snapshot
- self.mount_a.run_shell(['rmdir', group_snapshot_path], sudo=True)
+ self.mount_a.run_shell(['sudo', 'rmdir', group_snapshot_path], omit_sudo=False)
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
def test_subvolume_snapshot_info_without_snapshot_clone(self):
"""
- Verify subvolume snapshot info output without clonnnig snapshot.
+ Verify subvolume snapshot info output without cloning snapshot.
If no clone is performed then path /volumes/_index/clone/{track_id}
will not exist.
"""
# remove snapshot from backend to force the clone failure.
snappath = os.path.join(".", "volumes", "_nogroup", subvolume, ".snap", snapshot)
- self.mount_a.run_shell(['rmdir', snappath], sudo=True)
+ self.mount_a.run_shell(['sudo', 'rmdir', snappath], omit_sudo=False)
# wait for clone1 to fail.
self._wait_for_clone_to_fail(clone1)
# emulate a old-fashioned subvolume
createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
- self.mount_a.run_shell_payload(f"mkdir -p -m 777 {createpath}", sudo=True)
+ self.mount_a.run_shell_payload(f"sudo mkdir -p -m 777 {createpath}", omit_sudo=False)
# add required xattrs to subvolume
default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
# emulate a old-fashioned subvolume -- one in the default group and
# the other in a custom group
createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
- self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
# create group
createpath2 = os.path.join(".", "volumes", group, subvolume2)
- self.mount_a.run_shell(['mkdir', '-p', createpath2], sudo=True)
+ self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath2], omit_sudo=False)
# this would auto-upgrade on access without anyone noticing
subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
# remove group
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+
+class TestPerModuleFinsherThread(TestVolumesHelper):
+ """
+ Per module finisher thread tests related to mgr/volume cmds.
+ This is used in conjuction with check_counter with min val being 4
+ as four subvolume cmds are run
+ """
+ def test_volumes_module_finisher_thread(self):
+ subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3)
+ group = self._generate_random_group_name()
+
+ # create group
+ self._fs_cmd("subvolumegroup", "create", self.volname, group)
+
+ # create subvolumes in group
+ self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group)
+ self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group)
+ self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group)
+
+ self._fs_cmd("subvolume", "rm", self.volname, subvol1, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvol2, group)
+ self._fs_cmd("subvolume", "rm", self.volname, subvol3, group)
+ self._fs_cmd("subvolumegroup", "rm", self.volname, group)
+
+ # verify trash dir is clean
+ self._wait_for_trash_empty()