import json
import logging
-import time
import os
from textwrap import dedent
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from tasks.cephfs.fuse_mount import FuseMount
from teuthology.exceptions import CommandFailedError
+from teuthology.misc import sudo_write_file
log = logging.getLogger(__name__)
# One for looking at the global filesystem, one for being
# the VolumeClient, two for mounting the created shares
CLIENTS_REQUIRED = 4
+ default_py_version = 'python3'
+
+ def setUp(self):
+ CephFSTestCase.setUp(self)
+ self.py_version = self.ctx.config.get('overrides', {}).\
+ get('python3', TestVolumeClient.default_py_version)
+ log.info("using python version: {python_version}".format(
+ python_version=self.py_version
+ ))
def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
# Can't dedent this *and* the script we pass in, because they might have different
if ns_prefix:
ns_prefix = "\"" + ns_prefix + "\""
return client.run_python("""
+from __future__ import print_function
from ceph_volume_client import CephFSVolumeClient, VolumePath
+from sys import version_info as sys_version_info
+from rados import OSError as rados_OSError
import logging
log = logging.getLogger("ceph_volume_client")
log.addHandler(logging.StreamHandler())
vc.connect()
{payload}
vc.disconnect()
- """.format(payload=script, conf_path=client.config_path, vol_prefix=vol_prefix, ns_prefix=ns_prefix))
-
- def _sudo_write_file(self, remote, path, data):
- """
- Write data to a remote file as super user
-
- :param remote: Remote site.
- :param path: Path on the remote being written to.
- :param data: Data to be written.
-
- Both perms and owner are passed directly to chmod.
- """
- remote.run(
- args=[
- 'sudo',
- 'python',
- '-c',
- 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
- path,
- ],
- stdin=data,
- )
+ """.format(payload=script, conf_path=client.config_path,
+ vol_prefix=vol_prefix, ns_prefix=ns_prefix),
+ self.py_version)
def _configure_vc_auth(self, mount, id_name):
"""
"mon", "allow *"
)
mount.client_id = id_name
- self._sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
+ sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
def _configure_guest_auth(self, volumeclient_mount, guest_mount,
vp = VolumePath("{group_id}", "{volume_id}")
auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
tenant_id="{tenant_id}")
- print auth_result['auth_key']
+ print(auth_result['auth_key'])
""".format(
group_id=group_id,
volume_id=volume_id,
key=key
))
guest_mount.client_id = guest_entity
- self._sudo_write_file(guest_mount.client_remote,
- guest_mount.get_keyring_path(),
- keyring_txt)
+ sudo_write_file(guest_mount.client_remote,
+ guest_mount.get_keyring_path(), keyring_txt)
# Add a guest client section to the ceph config file.
self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
mount_path = self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*{volume_size})
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id,
:return:
"""
- # Because the teuthology config template sets mon_max_pg_per_osd to
- # 10000 (i.e. it just tries to ignore health warnings), reset it to something
- # sane before using volume_client, to avoid creating pools with absurdly large
- # numbers of PGs.
- self.set_conf("global", "mon max pg per osd", "300")
- for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
- mon_daemon_state.restart()
-
self.mount_b.umount_wait()
self._configure_vc_auth(self.mount_b, "manila")
- # Calculate how many PGs we'll expect the new volume pool to have
- osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
- max_per_osd = int(self.fs.get_config('mon_max_pg_per_osd'))
- osd_count = len(osd_map['osds'])
- max_overall = osd_count * max_per_osd
-
- existing_pg_count = 0
- for p in osd_map['pools']:
- existing_pg_count += p['pg_num']
-
- expected_pg_num = (max_overall - existing_pg_count) / 10
- log.info("max_per_osd {0}".format(max_per_osd))
- log.info("osd_count {0}".format(osd_count))
- log.info("max_overall {0}".format(max_overall))
- log.info("existing_pg_count {0}".format(existing_pg_count))
- log.info("expected_pg_num {0}".format(expected_pg_num))
-
pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
group_id = "grpid"
volume_id = "volid"
self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
- vc.create_volume(vp, 10, data_isolated=True)
+ vc.create_volume(vp, data_isolated=True)
""".format(
group_id=group_id,
volume_id=volume_id,
new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
self.assertEqual(len(new_pools), 1)
- # It should have followed the heuristic for PG count
- # (this is an overly strict test condition, so we may want to remove
- # it at some point as/when the logic gets fancier)
- created_pg_num = self.fs.mon_manager.get_pool_property(list(new_pools)[0], "pg_num")
- self.assertEqual(expected_pg_num, created_pg_num)
-
def test_15303(self):
"""
Reproducer for #15303 "Client holds incorrect complete flag on dir
m.umount_wait()
# Create a dir on mount A
- self.mount_a.mount()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["mkdir", "parent1"])
self.mount_a.run_shell(["mkdir", "parent2"])
self.mount_a.run_shell(["mkdir", "parent1/mydir"])
# Put some files in it from mount B
- self.mount_b.mount()
+ self.mount_b.mount_wait()
self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
self.mount_b.umount_wait()
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 10 * 1024 * 1024)
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_ids[i]
mount_path = self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", u"{volume_id}")
create_result = vc.create_volume(vp, 10)
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id
mount_path = self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*10)
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id,
# Read existing content of the volume.
self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
# Cannot write into read-only volume.
- with self.assertRaises(CommandFailedError):
+ try:
guest_mount.write_n_mb("rogue.bin", 1)
+ except CommandFailedError:
+ pass
def test_get_authorized_ids(self):
"""
guest_entity_1 = "guest1"
guest_entity_2 = "guest2"
- log.info("print group ID: {0}".format(group_id))
+ log.info("print(group ID: {0})".format(group_id))
# Create a volume.
auths = self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.create_volume(vp, 1024*1024*10)
auths = vc.get_authorized_ids(vp)
- print auths
+ print(auths)
""".format(
group_id=group_id,
volume_id=volume_id,
)))
# Check the list of authorized IDs for the volume.
- expected_result = None
- self.assertEqual(str(expected_result), auths)
+ self.assertEqual('None', auths)
# Allow two auth IDs access to the volume.
auths = self._volume_client_python(volumeclient_mount, dedent("""
vc.authorize(vp, "{guest_entity_1}", readonly=False)
vc.authorize(vp, "{guest_entity_2}", readonly=True)
auths = vc.get_authorized_ids(vp)
- print auths
+ print(auths)
""".format(
group_id=group_id,
volume_id=volume_id,
guest_entity_2=guest_entity_2,
)))
# Check the list of authorized IDs and their access levels.
- expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
- self.assertItemsEqual(str(expected_result), auths)
+ if self.py_version == 'python3':
+ expected_result = [('guest1', 'rw'), ('guest2', 'r')]
+ self.assertCountEqual(str(expected_result), auths)
+ else:
+ expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
+ self.assertItemsEqual(str(expected_result), auths)
# Disallow both the auth IDs' access to the volume.
auths = self._volume_client_python(volumeclient_mount, dedent("""
vc.deauthorize(vp, "{guest_entity_1}")
vc.deauthorize(vp, "{guest_entity_2}")
auths = vc.get_authorized_ids(vp)
- print auths
+ print(auths)
""".format(
group_id=group_id,
volume_id=volume_id,
guest_entity_2=guest_entity_2,
)))
# Check the list of authorized IDs for the volume.
- expected_result = None
- self.assertItemsEqual(str(expected_result), auths)
+ self.assertEqual('None', auths)
def test_multitenant_volumes(self):
"""
"version": 2,
"compat_version": 1,
"dirty": False,
- "tenant_id": u"tenant1",
+ "tenant_id": "tenant1",
"volumes": {
"groupid/volumeid": {
"dirty": False,
- "access_level": u"rw",
+ "access_level": "rw"
}
}
}
"auths": {
"guest": {
"dirty": False,
- "access_level": u"rw"
+ "access_level": "rw"
}
}
}
obj_data = obj_data
)))
+ def test_put_object_versioned(self):
+ vc_mount = self.mounts[1]
+ vc_mount.umount_wait()
+ self._configure_vc_auth(vc_mount, "manila")
+
+ obj_data = 'test_data'
+ obj_name = 'test_vc_obj'
+ pool_name = self.fs.get_data_pool_names()[0]
+ self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
+
+ self._volume_client_python(vc_mount, dedent("""
+ data, version_before = vc.get_object_and_version("{pool_name}", "{obj_name}")
+
+ if sys_version_info.major < 3:
+ data = data + 'modification1'
+ elif sys_version_info.major > 3:
+ data = str.encode(data.decode() + 'modification1')
+
+ vc.put_object_versioned("{pool_name}", "{obj_name}", data, version_before)
+ data, version_after = vc.get_object_and_version("{pool_name}", "{obj_name}")
+ assert version_after == version_before + 1
+ """).format(pool_name=pool_name, obj_name=obj_name))
+
+ def test_version_check_for_put_object_versioned(self):
+ vc_mount = self.mounts[1]
+ vc_mount.umount_wait()
+ self._configure_vc_auth(vc_mount, "manila")
+
+ obj_data = 'test_data'
+ obj_name = 'test_vc_ob_2'
+ pool_name = self.fs.get_data_pool_names()[0]
+ self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
+
+ # Test if put_object_versioned() crosschecks the version of the
+ # given object. Being a negative test, an exception is expected.
+ expected_exception = 'rados_OSError'
+ output = self._volume_client_python(vc_mount, dedent("""
+ data, version = vc.get_object_and_version("{pool_name}", "{obj_name}")
+
+ if sys_version_info.major < 3:
+ data = data + 'm1'
+ elif sys_version_info.major > 3:
+ data = str.encode(data.decode('utf-8') + 'm1')
+
+ vc.put_object("{pool_name}", "{obj_name}", data)
+
+ if sys_version_info.major < 3:
+ data = data + 'm2'
+ elif sys_version_info.major > 3:
+ data = str.encode(data.decode('utf-8') + 'm2')
+
+ try:
+ vc.put_object_versioned("{pool_name}", "{obj_name}", data, version)
+ except {expected_exception}:
+ print('{expected_exception} raised')
+ """).format(pool_name=pool_name, obj_name=obj_name,
+ expected_exception=expected_exception))
+ self.assertEqual(expected_exception + ' raised', output)
+
+
def test_delete_object(self):
vc_mount = self.mounts[1]
vc_mount.umount_wait()
mount_path = self._volume_client_python(vc_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*10)
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id
volume_prefix = "/myprefix"
group_id = "grpid"
volume_id = "volid"
- mount_path = self._volume_client_python(vc_mount, dedent("""
+ self._volume_client_python(vc_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id