X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=ceph%2Fqa%2Ftasks%2Fcephfs%2Ftest_volume_client.py;h=9be7fc2fff5575dced0f2e722ab0bacbf9629d99;hb=28e407b858acd3bddc89f68583571f771bb42e46;hp=f67b7f941a9655672e502990cb71d391d9b9cf78;hpb=7c673caec407dd16107e56e4b51a6d00f021315c;p=ceph.git diff --git a/ceph/qa/tasks/cephfs/test_volume_client.py b/ceph/qa/tasks/cephfs/test_volume_client.py index f67b7f941..9be7fc2ff 100644 --- a/ceph/qa/tasks/cephfs/test_volume_client.py +++ b/ceph/qa/tasks/cephfs/test_volume_client.py @@ -11,10 +11,6 @@ log = logging.getLogger(__name__) class TestVolumeClient(CephFSTestCase): - # - # TODO: Test that VolumeClient can recover from partial auth updates. - # - # One for looking at the global filesystem, one for being # the VolumeClient, two for mounting the created shares CLIENTS_REQUIRED = 4 @@ -234,13 +230,16 @@ vc.disconnect() # Write something outside volume to check this space usage is # not reported in the volume's DF. - other_bin_mb = 6 + other_bin_mb = 8 self.mount_a.write_n_mb("other.bin", other_bin_mb) # global: df should see all the writes (data + other). This is a > # rather than a == because the global spaced used includes all pools - self.assertGreater(self.mount_a.df()['used'], - (data_bin_mb + other_bin_mb) * 1024 * 1024) + def check_df(): + used = self.mount_a.df()['used'] + return used >= (other_bin_mb * 1024 * 1024) + + self.wait_until_true(check_df, timeout=30) # Hack: do a metadata IO to kick rstats self.mounts[2].run_shell(["touch", "foo"]) @@ -284,21 +283,21 @@ vc.disconnect() # it has lost network, because there is nothing to tell it that is messages # are being dropped because it's identity is gone) background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False) - time.sleep(10) # Approximate check for 'stuck' as 'still running after 10s' - self.assertFalse(background.finished) + try: + background.wait() + except CommandFailedError: + # command failed with EBLACKLISTED? + if "transport endpoint shutdown" in background.stderr.getvalue(): + pass + else: + raise # After deauthorisation, the client ID should be gone (this was the only # volume it was authorised for) self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()]) # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined) - self.mounts[2].kill() - self.mounts[2].kill_cleanup() - try: - background.wait() - except CommandFailedError: - # We killed the mount out from under you - pass + self.mounts[2].umount_wait() self._volume_client_python(self.mount_b, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") @@ -344,6 +343,19 @@ vc.disconnect() vc.delete_volume(vp, data_isolated=True) vc.purge_volume(vp, data_isolated=True) vc.purge_volume(vp, data_isolated=True) + + vc.create_volume(vp, 10, namespace_isolated=False) + vc.create_volume(vp, 10, namespace_isolated=False) + vc.authorize(vp, "{guest_entity}") + vc.authorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.evict("{guest_entity}") + vc.evict("{guest_entity}") + vc.delete_volume(vp) + vc.delete_volume(vp) + vc.purge_volume(vp) + vc.purge_volume(vp) """.format( group_id=group_id, volume_id=volume_id, @@ -356,11 +368,11 @@ vc.disconnect() :return: """ - # Because the teuthology config template sets mon_pg_warn_max_per_osd to + # Because the teuthology config template sets mon_max_pg_per_osd to # 10000 (i.e. it just tries to ignore health warnings), reset it to something # sane before using volume_client, to avoid creating pools with absurdly large # numbers of PGs. - self.set_conf("global", "mon pg warn max per osd", "300") + self.set_conf("global", "mon max pg per osd", "300") for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'): mon_daemon_state.restart() @@ -369,7 +381,7 @@ vc.disconnect() # Calculate how many PGs we'll expect the new volume pool to have osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty')) - max_per_osd = int(self.fs.get_config('mon_pg_warn_max_per_osd')) + max_per_osd = int(self.fs.get_config('mon_max_pg_per_osd')) osd_count = len(osd_map['osds']) max_overall = osd_count * max_per_osd @@ -494,12 +506,17 @@ vc.disconnect() ))) # Evicted guest client, guest_mounts[0], should not be able to do - # anymore metadata ops. It behaves as if it has lost network - # connection. - background = guest_mounts[0].write_n_mb("rogue.bin", 1, wait=False) - # Approximate check for 'stuck' as 'still running after 10s'. - time.sleep(10) - self.assertFalse(background.finished) + # anymore metadata ops. It should start failing all operations + # when it sees that its own address is in the blacklist. + try: + guest_mounts[0].write_n_mb("rogue.bin", 1) + except CommandFailedError: + pass + else: + raise RuntimeError("post-eviction write should have failed!") + + # The blacklisted guest client should now be unmountable + guest_mounts[0].umount_wait() # Guest client, guest_mounts[1], using the same auth ID 'guest', but # has mounted the other volume, should be able to use its volume @@ -519,8 +536,6 @@ vc.disconnect() guest_entity=guest_entity ))) - # We must hard-umount the one that we evicted - guest_mounts[0].umount_wait(force=True) def test_purge(self): """ @@ -762,52 +777,63 @@ vc.disconnect() # auth ID belongs to, the auth ID's authorized access levels # for different volumes, versioning details, etc. expected_auth_metadata = { - u"version": 1, - u"compat_version": 1, - u"dirty": False, - u"tenant_id": u"tenant1", - u"volumes": { - u"groupid/volumeid": { - u"dirty": False, - u"access_level": u"rw", + "version": 2, + "compat_version": 1, + "dirty": False, + "tenant_id": u"tenant1", + "volumes": { + "groupid/volumeid": { + "dirty": False, + "access_level": u"rw", } } } auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" + import json vp = VolumePath("{group_id}", "{volume_id}") auth_metadata = vc._auth_metadata_get("{auth_id}") - print auth_metadata + print(json.dumps(auth_metadata)) """.format( group_id=group_id, volume_id=volume_id, auth_id=guestclient_1["auth_id"], ))) + auth_metadata = json.loads(auth_metadata) - self.assertItemsEqual(str(expected_auth_metadata), auth_metadata) + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) # Verify that the volume metadata file stores info about auth IDs # and their access levels to the volume, versioning details, etc. expected_vol_metadata = { - u"version": 1, - u"compat_version": 1, - u"auths": { - u"guest": { - u"dirty": False, - u"access_level": u"rw" + "version": 2, + "compat_version": 1, + "auths": { + "guest": { + "dirty": False, + "access_level": u"rw" } } } vol_metadata = self._volume_client_python(volumeclient_mount, dedent(""" + import json vp = VolumePath("{group_id}", "{volume_id}") volume_metadata = vc._volume_metadata_get(vp) - print volume_metadata + print(json.dumps(volume_metadata)) """.format( group_id=group_id, volume_id=volume_id, ))) - self.assertItemsEqual(str(expected_vol_metadata), vol_metadata) + vol_metadata = json.loads(vol_metadata) + + self.assertGreaterEqual(vol_metadata["version"], expected_vol_metadata["version"]) + del expected_vol_metadata["version"] + del vol_metadata["version"] + self.assertEqual(expected_vol_metadata, vol_metadata) # Cannot authorize 'guestclient_2' to access the volume. # It uses auth ID 'guest', which has already been used by a @@ -903,3 +929,152 @@ vc.disconnect() volume_id=volume_id, auth_id=guestclient["auth_id"], ))) + + def test_put_object(self): + vc_mount = self.mounts[1] + vc_mount.umount_wait() + self._configure_vc_auth(vc_mount, "manila") + + obj_data = 'test data' + obj_name = 'test_vc_obj_1' + pool_name = self.fs.get_data_pool_names()[0] + + self._volume_client_python(vc_mount, dedent(""" + vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}") + """.format( + pool_name = pool_name, + obj_name = obj_name, + obj_data = obj_data + ))) + + read_data = self.fs.rados(['get', obj_name, '-'], pool=pool_name) + self.assertEqual(obj_data, read_data) + + def test_get_object(self): + vc_mount = self.mounts[1] + vc_mount.umount_wait() + self._configure_vc_auth(vc_mount, "manila") + + obj_data = 'test_data' + obj_name = 'test_vc_ob_2' + pool_name = self.fs.get_data_pool_names()[0] + + self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data) + + self._volume_client_python(vc_mount, dedent(""" + data_read = vc.get_object("{pool_name}", "{obj_name}") + assert data_read == b"{obj_data}" + """.format( + pool_name = pool_name, + obj_name = obj_name, + obj_data = obj_data + ))) + + def test_delete_object(self): + vc_mount = self.mounts[1] + vc_mount.umount_wait() + self._configure_vc_auth(vc_mount, "manila") + + obj_data = 'test data' + obj_name = 'test_vc_obj_3' + pool_name = self.fs.get_data_pool_names()[0] + + self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data) + + self._volume_client_python(vc_mount, dedent(""" + data_read = vc.delete_object("{pool_name}", "{obj_name}") + """.format( + pool_name = pool_name, + obj_name = obj_name, + ))) + + with self.assertRaises(CommandFailedError): + self.fs.rados(['stat', obj_name], pool=pool_name) + + # Check idempotency -- no error raised trying to delete non-existent + # object + self._volume_client_python(vc_mount, dedent(""" + data_read = vc.delete_object("{pool_name}", "{obj_name}") + """.format( + pool_name = pool_name, + obj_name = obj_name, + ))) + + def test_21501(self): + """ + Reproducer for #21501 "ceph_volume_client: sets invalid caps for + existing IDs with no caps" (http://tracker.ceph.com/issues/21501) + """ + + vc_mount = self.mounts[1] + vc_mount.umount_wait() + + # Configure vc_mount as the handle for driving volumeclient + self._configure_vc_auth(vc_mount, "manila") + + # Create a volume + group_id = "grpid" + volume_id = "volid" + mount_path = self._volume_client_python(vc_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 1024*1024*10) + print create_result['mount_path'] + """.format( + group_id=group_id, + volume_id=volume_id + ))) + + # Create an auth ID with no caps + guest_id = '21501' + self.fs.mon_manager.raw_cluster_cmd_result( + 'auth', 'get-or-create', 'client.{0}'.format(guest_id)) + + guest_mount = self.mounts[2] + guest_mount.umount_wait() + + # Set auth caps for the auth ID using the volumeclient + self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path) + + # Mount the volume in the guest using the auth ID to assert that the + # auth caps are valid + guest_mount.mount(mount_path=mount_path) + + def test_volume_without_namespace_isolation(self): + """ + That volume client can create volumes that do not have separate RADOS + namespace layouts. + """ + vc_mount = self.mounts[1] + vc_mount.umount_wait() + + # Configure vc_mount as the handle for driving volumeclient + self._configure_vc_auth(vc_mount, "manila") + + # Create a volume + volume_prefix = "/myprefix" + group_id = "grpid" + volume_id = "volid" + mount_path = self._volume_client_python(vc_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False) + print create_result['mount_path'] + """.format( + group_id=group_id, + volume_id=volume_id + )), volume_prefix) + + # The CephFS volume should be created + self.mounts[0].stat(os.path.join("myprefix", group_id, volume_id)) + vol_namespace = self.mounts[0].getfattr( + os.path.join("myprefix", group_id, volume_id), + "ceph.dir.layout.pool_namespace") + assert not vol_namespace + + self._volume_client_python(vc_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + vc.purge_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + )), volume_prefix)