]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
5 from textwrap
import dedent
6 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
7 from tasks
.cephfs
.fuse_mount
import FuseMount
8 from teuthology
.exceptions
import CommandFailedError
10 log
= logging
.getLogger(__name__
)
13 class TestVolumeClient(CephFSTestCase
):
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
20 CephFSTestCase
.setUp(self
)
21 self
.py_version
= self
.ctx
.config
.get('overrides', {}).get('python', 'python')
22 log
.info("using python version: {python_version}".format(
23 python_version
=self
.py_version
26 def _volume_client_python(self
, client
, script
, vol_prefix
=None, ns_prefix
=None):
27 # Can't dedent this *and* the script we pass in, because they might have different
28 # levels of indentation to begin with, so leave this string zero-indented
30 vol_prefix
= "\"" + vol_prefix
+ "\""
32 ns_prefix
= "\"" + ns_prefix
+ "\""
33 return client
.run_python("""
34 from __future__ import print_function
35 from ceph_volume_client import CephFSVolumeClient, VolumePath
37 log = logging.getLogger("ceph_volume_client")
38 log.addHandler(logging.StreamHandler())
39 log.setLevel(logging.DEBUG)
40 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
44 """.format(payload
=script
, conf_path
=client
.config_path
,
45 vol_prefix
=vol_prefix
, ns_prefix
=ns_prefix
),
48 def _sudo_write_file(self
, remote
, path
, data
):
50 Write data to a remote file as super user
52 :param remote: Remote site.
53 :param path: Path on the remote being written to.
54 :param data: Data to be written.
56 Both perms and owner are passed directly to chmod.
63 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
69 def _configure_vc_auth(self
, mount
, id_name
):
71 Set up auth credentials for the VolumeClient user
73 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
74 "auth", "get-or-create", "client.{name}".format(name
=id_name
),
79 mount
.client_id
= id_name
80 self
._sudo
_write
_file
(mount
.client_remote
, mount
.get_keyring_path(), out
)
81 self
.set_conf("client.{name}".format(name
=id_name
), "keyring", mount
.get_keyring_path())
83 def _configure_guest_auth(self
, volumeclient_mount
, guest_mount
,
84 guest_entity
, mount_path
,
85 namespace_prefix
=None, readonly
=False,
88 Set up auth credentials for the guest client to mount a volume.
90 :param volumeclient_mount: mount used as the handle for driving
92 :param guest_mount: mount used by the guest client.
93 :param guest_entity: auth ID used by the guest client.
94 :param mount_path: path of the volume.
95 :param namespace_prefix: name prefix of the RADOS namespace, which
96 is used for the volume's layout.
97 :param readonly: defaults to False. If set to 'True' only read-only
98 mount access is granted to the guest.
99 :param tenant_id: (OpenStack) tenant ID of the guest client.
102 head
, volume_id
= os
.path
.split(mount_path
)
103 head
, group_id
= os
.path
.split(head
)
104 head
, volume_prefix
= os
.path
.split(head
)
105 volume_prefix
= "/" + volume_prefix
107 # Authorize the guest client's auth ID to mount the volume.
108 key
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
109 vp = VolumePath("{group_id}", "{volume_id}")
110 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
111 tenant_id="{tenant_id}")
112 print(auth_result['auth_key'])
116 guest_entity
=guest_entity
,
118 tenant_id
=tenant_id
)), volume_prefix
, namespace_prefix
121 # CephFSVolumeClient's authorize() does not return the secret
122 # key to a caller who isn't multi-tenant aware. Explicitly
123 # query the key for such a client.
125 key
= self
.fs
.mon_manager
.raw_cluster_cmd(
126 "auth", "get-key", "client.{name}".format(name
=guest_entity
),
129 # The guest auth ID should exist.
130 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
131 self
.assertIn("client.{0}".format(guest_entity
), existing_ids
)
133 # Create keyring file for the guest client.
134 keyring_txt
= dedent("""
135 [client.{guest_entity}]
139 guest_entity
=guest_entity
,
142 guest_mount
.client_id
= guest_entity
143 self
._sudo
_write
_file
(guest_mount
.client_remote
,
144 guest_mount
.get_keyring_path(),
147 # Add a guest client section to the ceph config file.
148 self
.set_conf("client.{0}".format(guest_entity
), "client quota", "True")
149 self
.set_conf("client.{0}".format(guest_entity
), "debug client", "20")
150 self
.set_conf("client.{0}".format(guest_entity
), "debug objecter", "20")
151 self
.set_conf("client.{0}".format(guest_entity
),
152 "keyring", guest_mount
.get_keyring_path())
154 def test_default_prefix(self
):
157 DEFAULT_VOL_PREFIX
= "volumes"
158 DEFAULT_NS_PREFIX
= "fsvolumens_"
160 self
.mount_b
.umount_wait()
161 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
163 #create a volume with default prefix
164 self
._volume
_client
_python
(self
.mount_b
, dedent("""
165 vp = VolumePath("{group_id}", "{volume_id}")
166 vc.create_volume(vp, 10, data_isolated=True)
172 # The dir should be created
173 self
.mount_a
.stat(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
))
175 #namespace should be set
176 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
177 namespace
= "{0}{1}".format(DEFAULT_NS_PREFIX
, volume_id
)
178 self
.assertEqual(namespace
, ns_in_attr
)
181 def test_lifecycle(self
):
183 General smoke test for create, extend, destroy
186 # I'm going to use mount_c later as a guest for mounting the created
188 self
.mounts
[2].umount_wait()
190 # I'm going to leave mount_b unmounted and just use it as a handle for
191 # driving volumeclient. It's a little hacky but we don't have a more
192 # general concept for librados/libcephfs clients as opposed to full
193 # blown mounting clients.
194 self
.mount_b
.umount_wait()
195 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
197 guest_entity
= "guest"
201 volume_prefix
= "/myprefix"
202 namespace_prefix
= "mynsprefix_"
204 # Create a 100MB volume
206 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
207 vp = VolumePath("{group_id}", "{volume_id}")
208 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
209 print(create_result['mount_path'])
213 volume_size
=volume_size
214 )), volume_prefix
, namespace_prefix
)
216 # The dir should be created
217 self
.mount_a
.stat(os
.path
.join("myprefix", group_id
, volume_id
))
219 # Authorize and configure credentials for the guest to mount the
221 self
._configure
_guest
_auth
(self
.mount_b
, self
.mounts
[2], guest_entity
,
222 mount_path
, namespace_prefix
)
223 self
.mounts
[2].mount(mount_path
=mount_path
)
225 # The kernel client doesn't have the quota-based df behaviour,
226 # or quotas at all, so only exercise the client behaviour when
228 if isinstance(self
.mounts
[2], FuseMount
):
229 # df should see volume size, same as the quota set on volume's dir
230 self
.assertEqual(self
.mounts
[2].df()['total'],
231 volume_size
* 1024 * 1024)
233 self
.mount_a
.getfattr(
234 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
235 "ceph.quota.max_bytes"),
236 "%s" % (volume_size
* 1024 * 1024))
238 # df granularity is 4MB block so have to write at least that much
240 self
.mounts
[2].write_n_mb("data.bin", data_bin_mb
)
242 # Write something outside volume to check this space usage is
243 # not reported in the volume's DF.
245 self
.mount_a
.write_n_mb("other.bin", other_bin_mb
)
247 # global: df should see all the writes (data + other). This is a >
248 # rather than a == because the global spaced used includes all pools
250 used
= self
.mount_a
.df()['used']
251 return used
>= (other_bin_mb
* 1024 * 1024)
253 self
.wait_until_true(check_df
, timeout
=30)
255 # Hack: do a metadata IO to kick rstats
256 self
.mounts
[2].run_shell(["touch", "foo"])
258 # volume: df should see the data_bin_mb consumed from quota, same
259 # as the rbytes for the volume's dir
260 self
.wait_until_equal(
261 lambda: self
.mounts
[2].df()['used'],
262 data_bin_mb
* 1024 * 1024, timeout
=60)
263 self
.wait_until_equal(
264 lambda: self
.mount_a
.getfattr(
265 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
267 "%s" % (data_bin_mb
* 1024 * 1024), timeout
=60)
269 # sync so that file data are persist to rados
270 self
.mounts
[2].run_shell(["sync"])
272 # Our data should stay in particular rados namespace
273 pool_name
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool")
274 namespace
= "{0}{1}".format(namespace_prefix
, volume_id
)
275 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
276 self
.assertEqual(namespace
, ns_in_attr
)
278 objects_in_ns
= set(self
.fs
.rados(["ls"], pool
=pool_name
, namespace
=namespace
).split("\n"))
279 self
.assertNotEqual(objects_in_ns
, set())
281 # De-authorize the guest
282 self
._volume
_client
_python
(self
.mount_b
, dedent("""
283 vp = VolumePath("{group_id}", "{volume_id}")
284 vc.deauthorize(vp, "{guest_entity}")
285 vc.evict("{guest_entity}")
289 guest_entity
=guest_entity
290 )), volume_prefix
, namespace_prefix
)
292 # Once deauthorized, the client should be unable to do any more metadata ops
293 # The way that the client currently behaves here is to block (it acts like
294 # it has lost network, because there is nothing to tell it that is messages
295 # are being dropped because it's identity is gone)
296 background
= self
.mounts
[2].write_n_mb("rogue.bin", 1, wait
=False)
299 except CommandFailedError
:
300 # command failed with EBLACKLISTED?
301 if "transport endpoint shutdown" in background
.stderr
.getvalue():
306 # After deauthorisation, the client ID should be gone (this was the only
307 # volume it was authorised for)
308 self
.assertNotIn("client.{0}".format(guest_entity
), [e
['entity'] for e
in self
.auth_list()])
310 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
311 self
.mounts
[2].umount_wait()
313 self
._volume
_client
_python
(self
.mount_b
, dedent("""
314 vp = VolumePath("{group_id}", "{volume_id}")
320 )), volume_prefix
, namespace_prefix
)
322 def test_idempotency(self
):
324 That the volumeclient interface works when calling everything twice
326 self
.mount_b
.umount_wait()
327 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
329 guest_entity
= "guest"
332 self
._volume
_client
_python
(self
.mount_b
, dedent("""
333 vp = VolumePath("{group_id}", "{volume_id}")
334 vc.create_volume(vp, 10)
335 vc.create_volume(vp, 10)
336 vc.authorize(vp, "{guest_entity}")
337 vc.authorize(vp, "{guest_entity}")
338 vc.deauthorize(vp, "{guest_entity}")
339 vc.deauthorize(vp, "{guest_entity}")
345 vc.create_volume(vp, 10, data_isolated=True)
346 vc.create_volume(vp, 10, data_isolated=True)
347 vc.authorize(vp, "{guest_entity}")
348 vc.authorize(vp, "{guest_entity}")
349 vc.deauthorize(vp, "{guest_entity}")
350 vc.deauthorize(vp, "{guest_entity}")
351 vc.evict("{guest_entity}")
352 vc.evict("{guest_entity}")
353 vc.delete_volume(vp, data_isolated=True)
354 vc.delete_volume(vp, data_isolated=True)
355 vc.purge_volume(vp, data_isolated=True)
356 vc.purge_volume(vp, data_isolated=True)
358 vc.create_volume(vp, 10, namespace_isolated=False)
359 vc.create_volume(vp, 10, namespace_isolated=False)
360 vc.authorize(vp, "{guest_entity}")
361 vc.authorize(vp, "{guest_entity}")
362 vc.deauthorize(vp, "{guest_entity}")
363 vc.deauthorize(vp, "{guest_entity}")
364 vc.evict("{guest_entity}")
365 vc.evict("{guest_entity}")
373 guest_entity
=guest_entity
376 def test_data_isolated(self
):
378 That data isolated shares get their own pool
382 # Because the teuthology config template sets mon_max_pg_per_osd to
383 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
384 # sane before using volume_client, to avoid creating pools with absurdly large
386 self
.set_conf("global", "mon max pg per osd", "300")
387 for mon_daemon_state
in self
.ctx
.daemons
.iter_daemons_of_role('mon'):
388 mon_daemon_state
.restart()
390 self
.mount_b
.umount_wait()
391 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
393 # Calculate how many PGs we'll expect the new volume pool to have
394 osd_map
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
395 max_per_osd
= int(self
.fs
.get_config('mon_max_pg_per_osd'))
396 osd_count
= len(osd_map
['osds'])
397 max_overall
= osd_count
* max_per_osd
399 existing_pg_count
= 0
400 for p
in osd_map
['pools']:
401 existing_pg_count
+= p
['pg_num']
403 expected_pg_num
= (max_overall
- existing_pg_count
) / 10
404 log
.info("max_per_osd {0}".format(max_per_osd
))
405 log
.info("osd_count {0}".format(osd_count
))
406 log
.info("max_overall {0}".format(max_overall
))
407 log
.info("existing_pg_count {0}".format(existing_pg_count
))
408 log
.info("expected_pg_num {0}".format(expected_pg_num
))
410 pools_a
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
414 self
._volume
_client
_python
(self
.mount_b
, dedent("""
415 vp = VolumePath("{group_id}", "{volume_id}")
416 vc.create_volume(vp, 10, data_isolated=True)
422 pools_b
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
424 # Should have created one new pool
425 new_pools
= set(p
['pool_name'] for p
in pools_b
) - set([p
['pool_name'] for p
in pools_a
])
426 self
.assertEqual(len(new_pools
), 1)
428 # It should have followed the heuristic for PG count
429 # (this is an overly strict test condition, so we may want to remove
430 # it at some point as/when the logic gets fancier)
431 created_pg_num
= self
.fs
.mon_manager
.get_pool_property(list(new_pools
)[0], "pg_num")
432 self
.assertEqual(expected_pg_num
, created_pg_num
)
434 def test_15303(self
):
436 Reproducer for #15303 "Client holds incorrect complete flag on dir
437 after losing caps" (http://tracker.ceph.com/issues/15303)
439 for m
in self
.mounts
:
442 # Create a dir on mount A
444 self
.mount_a
.run_shell(["mkdir", "parent1"])
445 self
.mount_a
.run_shell(["mkdir", "parent2"])
446 self
.mount_a
.run_shell(["mkdir", "parent1/mydir"])
448 # Put some files in it from mount B
450 self
.mount_b
.run_shell(["touch", "parent1/mydir/afile"])
451 self
.mount_b
.umount_wait()
453 # List the dir's contents on mount A
454 self
.assertListEqual(self
.mount_a
.ls("parent1/mydir"),
457 def test_evict_client(self
):
459 That a volume client can be evicted based on its auth ID and the volume
463 if not isinstance(self
.mount_a
, FuseMount
):
464 self
.skipTest("Requires FUSE client to inject client metadata")
466 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
467 # and mounts[3] would be used as guests to mount the volumes/shares.
469 for i
in range(1, 4):
470 self
.mounts
[i
].umount_wait()
472 volumeclient_mount
= self
.mounts
[1]
473 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
474 guest_mounts
= (self
.mounts
[2], self
.mounts
[3])
476 guest_entity
= "guest"
481 # Create two volumes. Authorize 'guest' auth ID to mount the two
482 # volumes. Mount the two volumes. Write data to the volumes.
485 volume_ids
.append("volid_{0}".format(str(i
)))
487 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
488 vp = VolumePath("{group_id}", "{volume_id}")
489 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
490 print(create_result['mount_path'])
493 volume_id
=volume_ids
[i
]
496 # Authorize 'guest' auth ID to mount the volume.
497 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mounts
[i
],
498 guest_entity
, mount_paths
[i
])
501 guest_mounts
[i
].mountpoint_dir_name
= 'mnt.{id}.{suffix}'.format(
502 id=guest_entity
, suffix
=str(i
))
503 guest_mounts
[i
].mount(mount_path
=mount_paths
[i
])
504 guest_mounts
[i
].write_n_mb("data.bin", 1)
507 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
509 self
._volume
_client
_python
(self
.mount_b
, dedent("""
510 vp = VolumePath("{group_id}", "{volume_id}")
511 vc.deauthorize(vp, "{guest_entity}")
512 vc.evict("{guest_entity}", volume_path=vp)
515 volume_id
=volume_ids
[0],
516 guest_entity
=guest_entity
519 # Evicted guest client, guest_mounts[0], should not be able to do
520 # anymore metadata ops. It should start failing all operations
521 # when it sees that its own address is in the blacklist.
523 guest_mounts
[0].write_n_mb("rogue.bin", 1)
524 except CommandFailedError
:
527 raise RuntimeError("post-eviction write should have failed!")
529 # The blacklisted guest client should now be unmountable
530 guest_mounts
[0].umount_wait()
532 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
533 # has mounted the other volume, should be able to use its volume
535 guest_mounts
[1].write_n_mb("data.bin.1", 1)
539 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
540 vp = VolumePath("{group_id}", "{volume_id}")
541 vc.deauthorize(vp, "{guest_entity}")
546 volume_id
=volume_ids
[i
],
547 guest_entity
=guest_entity
551 def test_purge(self
):
553 Reproducer for #15266, exception trying to purge volumes that
554 contain non-ascii filenames.
556 Additionally test any other purge corner cases here.
558 # I'm going to leave mount_b unmounted and just use it as a handle for
559 # driving volumeclient. It's a little hacky but we don't have a more
560 # general concept for librados/libcephfs clients as opposed to full
561 # blown mounting clients.
562 self
.mount_b
.umount_wait()
563 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
566 # Use a unicode volume ID (like Manila), to reproduce #15266
570 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
571 vp = VolumePath("{group_id}", u"{volume_id}")
572 create_result = vc.create_volume(vp, 10)
573 print(create_result['mount_path'])
580 mount_path
= mount_path
[1:]
582 # A file with non-ascii characters
583 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, u
"b\u00F6b")])
585 # A file with no permissions to do anything
586 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, "noperms")])
587 self
.mount_a
.run_shell(["chmod", "0000", os
.path
.join(mount_path
, "noperms")])
589 self
._volume
_client
_python
(self
.mount_b
, dedent("""
590 vp = VolumePath("{group_id}", u"{volume_id}")
598 # Check it's really gone
599 self
.assertEqual(self
.mount_a
.ls("volumes/_deleting"), [])
600 self
.assertEqual(self
.mount_a
.ls("volumes/"), ["_deleting", group_id
])
602 def test_readonly_authorization(self
):
604 That guest clients can be restricted to read-only mounts of volumes.
607 volumeclient_mount
= self
.mounts
[1]
608 guest_mount
= self
.mounts
[2]
609 volumeclient_mount
.umount_wait()
610 guest_mount
.umount_wait()
612 # Configure volumeclient_mount as the handle for driving volumeclient.
613 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
615 guest_entity
= "guest"
620 mount_path
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
621 vp = VolumePath("{group_id}", "{volume_id}")
622 create_result = vc.create_volume(vp, 1024*1024*10)
623 print(create_result['mount_path'])
629 # Authorize and configure credentials for the guest to mount the
630 # the volume with read-write access.
631 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
632 mount_path
, readonly
=False)
634 # Mount the volume, and write to it.
635 guest_mount
.mount(mount_path
=mount_path
)
636 guest_mount
.write_n_mb("data.bin", 1)
638 # Change the guest auth ID's authorization to read-only mount access.
639 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
640 vp = VolumePath("{group_id}", "{volume_id}")
641 vc.deauthorize(vp, "{guest_entity}")
645 guest_entity
=guest_entity
647 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
648 mount_path
, readonly
=True)
650 # The effect of the change in access level to read-only is not
651 # immediate. The guest sees the change only after a remount of
653 guest_mount
.umount_wait()
654 guest_mount
.mount(mount_path
=mount_path
)
656 # Read existing content of the volume.
657 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
658 # Cannot write into read-only volume.
659 with self
.assertRaises(CommandFailedError
):
660 guest_mount
.write_n_mb("rogue.bin", 1)
662 def test_get_authorized_ids(self
):
664 That for a volume, the authorized IDs and their access levels
665 can be obtained using CephFSVolumeClient's get_authorized_ids().
667 volumeclient_mount
= self
.mounts
[1]
668 volumeclient_mount
.umount_wait()
670 # Configure volumeclient_mount as the handle for driving volumeclient.
671 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
675 guest_entity_1
= "guest1"
676 guest_entity_2
= "guest2"
678 log
.info("print(group ID: {0})".format(group_id
))
681 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
682 vp = VolumePath("{group_id}", "{volume_id}")
683 vc.create_volume(vp, 1024*1024*10)
684 auths = vc.get_authorized_ids(vp)
690 # Check the list of authorized IDs for the volume.
691 expected_result
= None
692 self
.assertEqual(str(expected_result
), auths
)
694 # Allow two auth IDs access to the volume.
695 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
696 vp = VolumePath("{group_id}", "{volume_id}")
697 vc.authorize(vp, "{guest_entity_1}", readonly=False)
698 vc.authorize(vp, "{guest_entity_2}", readonly=True)
699 auths = vc.get_authorized_ids(vp)
704 guest_entity_1
=guest_entity_1
,
705 guest_entity_2
=guest_entity_2
,
707 # Check the list of authorized IDs and their access levels.
708 if self
.py_version
== 'python3':
709 expected_result
= [('guest1', 'rw'), ('guest2', 'r')]
711 expected_result
= [(u
'guest1', u
'rw'), (u
'guest2', u
'r')]
713 self
.assertItemsEqual(str(expected_result
), auths
)
715 # Disallow both the auth IDs' access to the volume.
716 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
717 vp = VolumePath("{group_id}", "{volume_id}")
718 vc.deauthorize(vp, "{guest_entity_1}")
719 vc.deauthorize(vp, "{guest_entity_2}")
720 auths = vc.get_authorized_ids(vp)
725 guest_entity_1
=guest_entity_1
,
726 guest_entity_2
=guest_entity_2
,
728 # Check the list of authorized IDs for the volume.
729 expected_result
= None
730 self
.assertItemsEqual(str(expected_result
), auths
)
732 def test_multitenant_volumes(self
):
734 That volume access can be restricted to a tenant.
736 That metadata used to enforce tenant isolation of
737 volumes is stored as a two-way mapping between auth
738 IDs and volumes that they're authorized to access.
740 volumeclient_mount
= self
.mounts
[1]
741 volumeclient_mount
.umount_wait()
743 # Configure volumeclient_mount as the handle for driving volumeclient.
744 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
747 volume_id
= "volumeid"
749 # Guest clients belonging to different tenants, but using the same
754 "tenant_id": "tenant1",
758 "tenant_id": "tenant2",
762 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
763 vp = VolumePath("{group_id}", "{volume_id}")
764 vc.create_volume(vp, 1024*1024*10)
770 # Check that volume metadata file is created on volume creation.
771 vol_metadata_filename
= "_{0}:{1}.meta".format(group_id
, volume_id
)
772 self
.assertIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
774 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
775 # 'tenant1', with 'rw' access to the volume.
776 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
777 vp = VolumePath("{group_id}", "{volume_id}")
778 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
782 auth_id
=guestclient_1
["auth_id"],
783 tenant_id
=guestclient_1
["tenant_id"]
786 # Check that auth metadata file for auth ID 'guest', is
787 # created on authorizing 'guest' access to the volume.
788 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
789 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
791 # Verify that the auth metadata file stores the tenant ID that the
792 # auth ID belongs to, the auth ID's authorized access levels
793 # for different volumes, versioning details, etc.
794 expected_auth_metadata
= {
798 "tenant_id": "tenant1",
800 "groupid/volumeid": {
807 auth_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
809 vp = VolumePath("{group_id}", "{volume_id}")
810 auth_metadata = vc._auth_metadata_get("{auth_id}")
811 print(json.dumps(auth_metadata))
815 auth_id
=guestclient_1
["auth_id"],
817 auth_metadata
= json
.loads(auth_metadata
)
819 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
820 del expected_auth_metadata
["version"]
821 del auth_metadata
["version"]
822 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
824 # Verify that the volume metadata file stores info about auth IDs
825 # and their access levels to the volume, versioning details, etc.
826 expected_vol_metadata
= {
837 vol_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
839 vp = VolumePath("{group_id}", "{volume_id}")
840 volume_metadata = vc._volume_metadata_get(vp)
841 print(json.dumps(volume_metadata))
846 vol_metadata
= json
.loads(vol_metadata
)
848 self
.assertGreaterEqual(vol_metadata
["version"], expected_vol_metadata
["version"])
849 del expected_vol_metadata
["version"]
850 del vol_metadata
["version"]
851 self
.assertEqual(expected_vol_metadata
, vol_metadata
)
853 # Cannot authorize 'guestclient_2' to access the volume.
854 # It uses auth ID 'guest', which has already been used by a
855 # 'guestclient_1' belonging to an another tenant for accessing
857 with self
.assertRaises(CommandFailedError
):
858 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
859 vp = VolumePath("{group_id}", "{volume_id}")
860 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
864 auth_id
=guestclient_2
["auth_id"],
865 tenant_id
=guestclient_2
["tenant_id"]
868 # Check that auth metadata file is cleaned up on removing
869 # auth ID's only access to a volume.
870 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
871 vp = VolumePath("{group_id}", "{volume_id}")
872 vc.deauthorize(vp, "{guest_entity}")
876 guest_entity
=guestclient_1
["auth_id"]
879 self
.assertNotIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
881 # Check that volume metadata file is cleaned up on volume deletion.
882 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
883 vp = VolumePath("{group_id}", "{volume_id}")
889 self
.assertNotIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
891 def test_recover_metadata(self
):
893 That volume client can recover from partial auth updates using
894 metadata files, which store auth info and its update status info.
896 volumeclient_mount
= self
.mounts
[1]
897 volumeclient_mount
.umount_wait()
899 # Configure volumeclient_mount as the handle for driving volumeclient.
900 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
903 volume_id
= "volumeid"
907 "tenant_id": "tenant",
911 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
912 vp = VolumePath("{group_id}", "{volume_id}")
913 vc.create_volume(vp, 1024*1024*10)
919 # Authorize 'guestclient' access to the volume.
920 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
921 vp = VolumePath("{group_id}", "{volume_id}")
922 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
926 auth_id
=guestclient
["auth_id"],
927 tenant_id
=guestclient
["tenant_id"]
930 # Check that auth metadata file for auth ID 'guest' is created.
931 auth_metadata_filename
= "${0}.meta".format(guestclient
["auth_id"])
932 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
934 # Induce partial auth update state by modifying the auth metadata file,
935 # and then run recovery procedure.
936 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
937 vp = VolumePath("{group_id}", "{volume_id}")
938 auth_metadata = vc._auth_metadata_get("{auth_id}")
939 auth_metadata['dirty'] = True
940 vc._auth_metadata_set("{auth_id}", auth_metadata)
945 auth_id
=guestclient
["auth_id"],
948 def test_put_object(self
):
949 vc_mount
= self
.mounts
[1]
950 vc_mount
.umount_wait()
951 self
._configure
_vc
_auth
(vc_mount
, "manila")
953 obj_data
= 'test data'
954 obj_name
= 'test_vc_obj_1'
955 pool_name
= self
.fs
.get_data_pool_names()[0]
957 self
._volume
_client
_python
(vc_mount
, dedent("""
958 vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}")
960 pool_name
= pool_name
,
965 read_data
= self
.fs
.rados(['get', obj_name
, '-'], pool
=pool_name
)
966 self
.assertEqual(obj_data
, read_data
)
968 def test_get_object(self
):
969 vc_mount
= self
.mounts
[1]
970 vc_mount
.umount_wait()
971 self
._configure
_vc
_auth
(vc_mount
, "manila")
973 obj_data
= 'test_data'
974 obj_name
= 'test_vc_ob_2'
975 pool_name
= self
.fs
.get_data_pool_names()[0]
977 self
.fs
.rados(['put', obj_name
, '-'], pool
=pool_name
, stdin_data
=obj_data
)
979 self
._volume
_client
_python
(vc_mount
, dedent("""
980 data_read = vc.get_object("{pool_name}", "{obj_name}")
981 assert data_read == b"{obj_data}"
983 pool_name
= pool_name
,
988 def test_put_object_versioned(self
):
989 vc_mount
= self
.mounts
[1]
990 vc_mount
.umount_wait()
991 self
._configure
_vc
_auth
(vc_mount
, "manila")
993 obj_data
= 'test_data'
994 obj_name
= 'test_vc_ob_2'
995 pool_name
= self
.fs
.get_data_pool_names()[0]
996 self
.fs
.rados(['put', obj_name
, '-'], pool
=pool_name
, stdin_data
=obj_data
)
998 # Test if put_object_versioned() crosschecks the version of the
999 # given object. Being a negative test, an exception is expected.
1000 with self
.assertRaises(CommandFailedError
):
1001 self
._volume
_client
_python
(vc_mount
, dedent("""
1002 data, version = vc.get_object_and_version("{pool_name}", "{obj_name}")
1004 vc.put_object("{pool_name}", "{obj_name}", data)
1006 vc.put_object_versioned("{pool_name}", "{obj_name}", data, version)
1007 """).format(pool_name
=pool_name
, obj_name
=obj_name
))
1009 def test_delete_object(self
):
1010 vc_mount
= self
.mounts
[1]
1011 vc_mount
.umount_wait()
1012 self
._configure
_vc
_auth
(vc_mount
, "manila")
1014 obj_data
= 'test data'
1015 obj_name
= 'test_vc_obj_3'
1016 pool_name
= self
.fs
.get_data_pool_names()[0]
1018 self
.fs
.rados(['put', obj_name
, '-'], pool
=pool_name
, stdin_data
=obj_data
)
1020 self
._volume
_client
_python
(vc_mount
, dedent("""
1021 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1023 pool_name
= pool_name
,
1024 obj_name
= obj_name
,
1027 with self
.assertRaises(CommandFailedError
):
1028 self
.fs
.rados(['stat', obj_name
], pool
=pool_name
)
1030 # Check idempotency -- no error raised trying to delete non-existent
1032 self
._volume
_client
_python
(vc_mount
, dedent("""
1033 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1035 pool_name
= pool_name
,
1036 obj_name
= obj_name
,
1039 def test_21501(self
):
1041 Reproducer for #21501 "ceph_volume_client: sets invalid caps for
1042 existing IDs with no caps" (http://tracker.ceph.com/issues/21501)
1045 vc_mount
= self
.mounts
[1]
1046 vc_mount
.umount_wait()
1048 # Configure vc_mount as the handle for driving volumeclient
1049 self
._configure
_vc
_auth
(vc_mount
, "manila")
1054 mount_path
= self
._volume
_client
_python
(vc_mount
, dedent("""
1055 vp = VolumePath("{group_id}", "{volume_id}")
1056 create_result = vc.create_volume(vp, 1024*1024*10)
1057 print(create_result['mount_path'])
1063 # Create an auth ID with no caps
1065 self
.fs
.mon_manager
.raw_cluster_cmd_result(
1066 'auth', 'get-or-create', 'client.{0}'.format(guest_id
))
1068 guest_mount
= self
.mounts
[2]
1069 guest_mount
.umount_wait()
1071 # Set auth caps for the auth ID using the volumeclient
1072 self
._configure
_guest
_auth
(vc_mount
, guest_mount
, guest_id
, mount_path
)
1074 # Mount the volume in the guest using the auth ID to assert that the
1075 # auth caps are valid
1076 guest_mount
.mount(mount_path
=mount_path
)
1078 def test_volume_without_namespace_isolation(self
):
1080 That volume client can create volumes that do not have separate RADOS
1083 vc_mount
= self
.mounts
[1]
1084 vc_mount
.umount_wait()
1086 # Configure vc_mount as the handle for driving volumeclient
1087 self
._configure
_vc
_auth
(vc_mount
, "manila")
1090 volume_prefix
= "/myprefix"
1093 mount_path
= self
._volume
_client
_python
(vc_mount
, dedent("""
1094 vp = VolumePath("{group_id}", "{volume_id}")
1095 create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
1096 print(create_result['mount_path'])
1102 # The CephFS volume should be created
1103 self
.mounts
[0].stat(os
.path
.join("myprefix", group_id
, volume_id
))
1104 vol_namespace
= self
.mounts
[0].getfattr(
1105 os
.path
.join("myprefix", group_id
, volume_id
),
1106 "ceph.dir.layout.pool_namespace")
1107 assert not vol_namespace
1109 self
._volume
_client
_python
(vc_mount
, dedent("""
1110 vp = VolumePath("{group_id}", "{volume_id}")
1111 vc.delete_volume(vp)
1115 volume_id
=volume_id
,