]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
5 from textwrap
import dedent
6 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
7 from tasks
.cephfs
.fuse_mount
import FuseMount
8 from teuthology
.exceptions
import CommandFailedError
10 log
= logging
.getLogger(__name__
)
13 class TestVolumeClient(CephFSTestCase
):
15 # TODO: Test that VolumeClient can recover from partial auth updates.
18 # One for looking at the global filesystem, one for being
19 # the VolumeClient, two for mounting the created shares
22 def _volume_client_python(self
, client
, script
, vol_prefix
=None, ns_prefix
=None):
23 # Can't dedent this *and* the script we pass in, because they might have different
24 # levels of indentation to begin with, so leave this string zero-indented
26 vol_prefix
= "\"" + vol_prefix
+ "\""
28 ns_prefix
= "\"" + ns_prefix
+ "\""
29 return client
.run_python("""
30 from ceph_volume_client import CephFSVolumeClient, VolumePath
32 log = logging.getLogger("ceph_volume_client")
33 log.addHandler(logging.StreamHandler())
34 log.setLevel(logging.DEBUG)
35 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
39 """.format(payload
=script
, conf_path
=client
.config_path
, vol_prefix
=vol_prefix
, ns_prefix
=ns_prefix
))
41 def _sudo_write_file(self
, remote
, path
, data
):
43 Write data to a remote file as super user
45 :param remote: Remote site.
46 :param path: Path on the remote being written to.
47 :param data: Data to be written.
49 Both perms and owner are passed directly to chmod.
56 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
62 def _configure_vc_auth(self
, mount
, id_name
):
64 Set up auth credentials for the VolumeClient user
66 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
67 "auth", "get-or-create", "client.{name}".format(name
=id_name
),
72 mount
.client_id
= id_name
73 self
._sudo
_write
_file
(mount
.client_remote
, mount
.get_keyring_path(), out
)
74 self
.set_conf("client.{name}".format(name
=id_name
), "keyring", mount
.get_keyring_path())
76 def _configure_guest_auth(self
, volumeclient_mount
, guest_mount
,
77 guest_entity
, mount_path
,
78 namespace_prefix
=None, readonly
=False,
81 Set up auth credentials for the guest client to mount a volume.
83 :param volumeclient_mount: mount used as the handle for driving
85 :param guest_mount: mount used by the guest client.
86 :param guest_entity: auth ID used by the guest client.
87 :param mount_path: path of the volume.
88 :param namespace_prefix: name prefix of the RADOS namespace, which
89 is used for the volume's layout.
90 :param readonly: defaults to False. If set to 'True' only read-only
91 mount access is granted to the guest.
92 :param tenant_id: (OpenStack) tenant ID of the guest client.
95 head
, volume_id
= os
.path
.split(mount_path
)
96 head
, group_id
= os
.path
.split(head
)
97 head
, volume_prefix
= os
.path
.split(head
)
98 volume_prefix
= "/" + volume_prefix
100 # Authorize the guest client's auth ID to mount the volume.
101 key
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
102 vp = VolumePath("{group_id}", "{volume_id}")
103 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
104 tenant_id="{tenant_id}")
105 print auth_result['auth_key']
109 guest_entity
=guest_entity
,
111 tenant_id
=tenant_id
)), volume_prefix
, namespace_prefix
114 # CephFSVolumeClient's authorize() does not return the secret
115 # key to a caller who isn't multi-tenant aware. Explicitly
116 # query the key for such a client.
118 key
= self
.fs
.mon_manager
.raw_cluster_cmd(
119 "auth", "get-key", "client.{name}".format(name
=guest_entity
),
122 # The guest auth ID should exist.
123 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
124 self
.assertIn("client.{0}".format(guest_entity
), existing_ids
)
126 # Create keyring file for the guest client.
127 keyring_txt
= dedent("""
128 [client.{guest_entity}]
132 guest_entity
=guest_entity
,
135 guest_mount
.client_id
= guest_entity
136 self
._sudo
_write
_file
(guest_mount
.client_remote
,
137 guest_mount
.get_keyring_path(),
140 # Add a guest client section to the ceph config file.
141 self
.set_conf("client.{0}".format(guest_entity
), "client quota", "True")
142 self
.set_conf("client.{0}".format(guest_entity
), "debug client", "20")
143 self
.set_conf("client.{0}".format(guest_entity
), "debug objecter", "20")
144 self
.set_conf("client.{0}".format(guest_entity
),
145 "keyring", guest_mount
.get_keyring_path())
147 def test_default_prefix(self
):
150 DEFAULT_VOL_PREFIX
= "volumes"
151 DEFAULT_NS_PREFIX
= "fsvolumens_"
153 self
.mount_b
.umount_wait()
154 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
156 #create a volume with default prefix
157 self
._volume
_client
_python
(self
.mount_b
, dedent("""
158 vp = VolumePath("{group_id}", "{volume_id}")
159 vc.create_volume(vp, 10, data_isolated=True)
165 # The dir should be created
166 self
.mount_a
.stat(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
))
168 #namespace should be set
169 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
170 namespace
= "{0}{1}".format(DEFAULT_NS_PREFIX
, volume_id
)
171 self
.assertEqual(namespace
, ns_in_attr
)
174 def test_lifecycle(self
):
176 General smoke test for create, extend, destroy
179 # I'm going to use mount_c later as a guest for mounting the created
181 self
.mounts
[2].umount_wait()
183 # I'm going to leave mount_b unmounted and just use it as a handle for
184 # driving volumeclient. It's a little hacky but we don't have a more
185 # general concept for librados/libcephfs clients as opposed to full
186 # blown mounting clients.
187 self
.mount_b
.umount_wait()
188 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
190 guest_entity
= "guest"
194 volume_prefix
= "/myprefix"
195 namespace_prefix
= "mynsprefix_"
197 # Create a 100MB volume
199 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
200 vp = VolumePath("{group_id}", "{volume_id}")
201 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
202 print create_result['mount_path']
206 volume_size
=volume_size
207 )), volume_prefix
, namespace_prefix
)
209 # The dir should be created
210 self
.mount_a
.stat(os
.path
.join("myprefix", group_id
, volume_id
))
212 # Authorize and configure credentials for the guest to mount the
214 self
._configure
_guest
_auth
(self
.mount_b
, self
.mounts
[2], guest_entity
,
215 mount_path
, namespace_prefix
)
216 self
.mounts
[2].mount(mount_path
=mount_path
)
218 # The kernel client doesn't have the quota-based df behaviour,
219 # or quotas at all, so only exercise the client behaviour when
221 if isinstance(self
.mounts
[2], FuseMount
):
222 # df should see volume size, same as the quota set on volume's dir
223 self
.assertEqual(self
.mounts
[2].df()['total'],
224 volume_size
* 1024 * 1024)
226 self
.mount_a
.getfattr(
227 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
228 "ceph.quota.max_bytes"),
229 "%s" % (volume_size
* 1024 * 1024))
231 # df granularity is 4MB block so have to write at least that much
233 self
.mounts
[2].write_n_mb("data.bin", data_bin_mb
)
235 # Write something outside volume to check this space usage is
236 # not reported in the volume's DF.
238 self
.mount_a
.write_n_mb("other.bin", other_bin_mb
)
240 # global: df should see all the writes (data + other). This is a >
241 # rather than a == because the global spaced used includes all pools
242 self
.assertGreater(self
.mount_a
.df()['used'],
243 (data_bin_mb
+ other_bin_mb
) * 1024 * 1024)
245 # Hack: do a metadata IO to kick rstats
246 self
.mounts
[2].run_shell(["touch", "foo"])
248 # volume: df should see the data_bin_mb consumed from quota, same
249 # as the rbytes for the volume's dir
250 self
.wait_until_equal(
251 lambda: self
.mounts
[2].df()['used'],
252 data_bin_mb
* 1024 * 1024, timeout
=60)
253 self
.wait_until_equal(
254 lambda: self
.mount_a
.getfattr(
255 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
257 "%s" % (data_bin_mb
* 1024 * 1024), timeout
=60)
259 # sync so that file data are persist to rados
260 self
.mounts
[2].run_shell(["sync"])
262 # Our data should stay in particular rados namespace
263 pool_name
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool")
264 namespace
= "{0}{1}".format(namespace_prefix
, volume_id
)
265 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
266 self
.assertEqual(namespace
, ns_in_attr
)
268 objects_in_ns
= set(self
.fs
.rados(["ls"], pool
=pool_name
, namespace
=namespace
).split("\n"))
269 self
.assertNotEqual(objects_in_ns
, set())
271 # De-authorize the guest
272 self
._volume
_client
_python
(self
.mount_b
, dedent("""
273 vp = VolumePath("{group_id}", "{volume_id}")
274 vc.deauthorize(vp, "{guest_entity}")
275 vc.evict("{guest_entity}")
279 guest_entity
=guest_entity
280 )), volume_prefix
, namespace_prefix
)
282 # Once deauthorized, the client should be unable to do any more metadata ops
283 # The way that the client currently behaves here is to block (it acts like
284 # it has lost network, because there is nothing to tell it that is messages
285 # are being dropped because it's identity is gone)
286 background
= self
.mounts
[2].write_n_mb("rogue.bin", 1, wait
=False)
287 time
.sleep(10) # Approximate check for 'stuck' as 'still running after 10s'
288 self
.assertFalse(background
.finished
)
290 # After deauthorisation, the client ID should be gone (this was the only
291 # volume it was authorised for)
292 self
.assertNotIn("client.{0}".format(guest_entity
), [e
['entity'] for e
in self
.auth_list()])
294 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
295 self
.mounts
[2].kill()
296 self
.mounts
[2].kill_cleanup()
299 except CommandFailedError
:
300 # We killed the mount out from under you
303 self
._volume
_client
_python
(self
.mount_b
, dedent("""
304 vp = VolumePath("{group_id}", "{volume_id}")
310 )), volume_prefix
, namespace_prefix
)
312 def test_idempotency(self
):
314 That the volumeclient interface works when calling everything twice
316 self
.mount_b
.umount_wait()
317 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
319 guest_entity
= "guest"
322 self
._volume
_client
_python
(self
.mount_b
, dedent("""
323 vp = VolumePath("{group_id}", "{volume_id}")
324 vc.create_volume(vp, 10)
325 vc.create_volume(vp, 10)
326 vc.authorize(vp, "{guest_entity}")
327 vc.authorize(vp, "{guest_entity}")
328 vc.deauthorize(vp, "{guest_entity}")
329 vc.deauthorize(vp, "{guest_entity}")
335 vc.create_volume(vp, 10, data_isolated=True)
336 vc.create_volume(vp, 10, data_isolated=True)
337 vc.authorize(vp, "{guest_entity}")
338 vc.authorize(vp, "{guest_entity}")
339 vc.deauthorize(vp, "{guest_entity}")
340 vc.deauthorize(vp, "{guest_entity}")
341 vc.evict("{guest_entity}")
342 vc.evict("{guest_entity}")
343 vc.delete_volume(vp, data_isolated=True)
344 vc.delete_volume(vp, data_isolated=True)
345 vc.purge_volume(vp, data_isolated=True)
346 vc.purge_volume(vp, data_isolated=True)
350 guest_entity
=guest_entity
353 def test_data_isolated(self
):
355 That data isolated shares get their own pool
359 # Because the teuthology config template sets mon_pg_warn_max_per_osd to
360 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
361 # sane before using volume_client, to avoid creating pools with absurdly large
363 self
.set_conf("global", "mon pg warn max per osd", "300")
364 for mon_daemon_state
in self
.ctx
.daemons
.iter_daemons_of_role('mon'):
365 mon_daemon_state
.restart()
367 self
.mount_b
.umount_wait()
368 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
370 # Calculate how many PGs we'll expect the new volume pool to have
371 osd_map
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
372 max_per_osd
= int(self
.fs
.get_config('mon_pg_warn_max_per_osd'))
373 osd_count
= len(osd_map
['osds'])
374 max_overall
= osd_count
* max_per_osd
376 existing_pg_count
= 0
377 for p
in osd_map
['pools']:
378 existing_pg_count
+= p
['pg_num']
380 expected_pg_num
= (max_overall
- existing_pg_count
) / 10
381 log
.info("max_per_osd {0}".format(max_per_osd
))
382 log
.info("osd_count {0}".format(osd_count
))
383 log
.info("max_overall {0}".format(max_overall
))
384 log
.info("existing_pg_count {0}".format(existing_pg_count
))
385 log
.info("expected_pg_num {0}".format(expected_pg_num
))
387 pools_a
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
391 self
._volume
_client
_python
(self
.mount_b
, dedent("""
392 vp = VolumePath("{group_id}", "{volume_id}")
393 vc.create_volume(vp, 10, data_isolated=True)
399 pools_b
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
401 # Should have created one new pool
402 new_pools
= set(p
['pool_name'] for p
in pools_b
) - set([p
['pool_name'] for p
in pools_a
])
403 self
.assertEqual(len(new_pools
), 1)
405 # It should have followed the heuristic for PG count
406 # (this is an overly strict test condition, so we may want to remove
407 # it at some point as/when the logic gets fancier)
408 created_pg_num
= self
.fs
.mon_manager
.get_pool_property(list(new_pools
)[0], "pg_num")
409 self
.assertEqual(expected_pg_num
, created_pg_num
)
411 def test_15303(self
):
413 Reproducer for #15303 "Client holds incorrect complete flag on dir
414 after losing caps" (http://tracker.ceph.com/issues/15303)
416 for m
in self
.mounts
:
419 # Create a dir on mount A
421 self
.mount_a
.run_shell(["mkdir", "parent1"])
422 self
.mount_a
.run_shell(["mkdir", "parent2"])
423 self
.mount_a
.run_shell(["mkdir", "parent1/mydir"])
425 # Put some files in it from mount B
427 self
.mount_b
.run_shell(["touch", "parent1/mydir/afile"])
428 self
.mount_b
.umount_wait()
430 # List the dir's contents on mount A
431 self
.assertListEqual(self
.mount_a
.ls("parent1/mydir"),
434 def test_evict_client(self
):
436 That a volume client can be evicted based on its auth ID and the volume
440 if not isinstance(self
.mount_a
, FuseMount
):
441 self
.skipTest("Requires FUSE client to inject client metadata")
443 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
444 # and mounts[3] would be used as guests to mount the volumes/shares.
446 for i
in range(1, 4):
447 self
.mounts
[i
].umount_wait()
449 volumeclient_mount
= self
.mounts
[1]
450 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
451 guest_mounts
= (self
.mounts
[2], self
.mounts
[3])
453 guest_entity
= "guest"
458 # Create two volumes. Authorize 'guest' auth ID to mount the two
459 # volumes. Mount the two volumes. Write data to the volumes.
462 volume_ids
.append("volid_{0}".format(str(i
)))
464 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
465 vp = VolumePath("{group_id}", "{volume_id}")
466 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
467 print create_result['mount_path']
470 volume_id
=volume_ids
[i
]
473 # Authorize 'guest' auth ID to mount the volume.
474 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mounts
[i
],
475 guest_entity
, mount_paths
[i
])
478 guest_mounts
[i
].mountpoint_dir_name
= 'mnt.{id}.{suffix}'.format(
479 id=guest_entity
, suffix
=str(i
))
480 guest_mounts
[i
].mount(mount_path
=mount_paths
[i
])
481 guest_mounts
[i
].write_n_mb("data.bin", 1)
484 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
486 self
._volume
_client
_python
(self
.mount_b
, dedent("""
487 vp = VolumePath("{group_id}", "{volume_id}")
488 vc.deauthorize(vp, "{guest_entity}")
489 vc.evict("{guest_entity}", volume_path=vp)
492 volume_id
=volume_ids
[0],
493 guest_entity
=guest_entity
496 # Evicted guest client, guest_mounts[0], should not be able to do
497 # anymore metadata ops. It behaves as if it has lost network
499 background
= guest_mounts
[0].write_n_mb("rogue.bin", 1, wait
=False)
500 # Approximate check for 'stuck' as 'still running after 10s'.
502 self
.assertFalse(background
.finished
)
504 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
505 # has mounted the other volume, should be able to use its volume
507 guest_mounts
[1].write_n_mb("data.bin.1", 1)
511 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
512 vp = VolumePath("{group_id}", "{volume_id}")
513 vc.deauthorize(vp, "{guest_entity}")
518 volume_id
=volume_ids
[i
],
519 guest_entity
=guest_entity
522 # We must hard-umount the one that we evicted
523 guest_mounts
[0].umount_wait(force
=True)
525 def test_purge(self
):
527 Reproducer for #15266, exception trying to purge volumes that
528 contain non-ascii filenames.
530 Additionally test any other purge corner cases here.
532 # I'm going to leave mount_b unmounted and just use it as a handle for
533 # driving volumeclient. It's a little hacky but we don't have a more
534 # general concept for librados/libcephfs clients as opposed to full
535 # blown mounting clients.
536 self
.mount_b
.umount_wait()
537 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
540 # Use a unicode volume ID (like Manila), to reproduce #15266
544 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
545 vp = VolumePath("{group_id}", u"{volume_id}")
546 create_result = vc.create_volume(vp, 10)
547 print create_result['mount_path']
554 mount_path
= mount_path
[1:]
556 # A file with non-ascii characters
557 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, u
"b\u00F6b")])
559 # A file with no permissions to do anything
560 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, "noperms")])
561 self
.mount_a
.run_shell(["chmod", "0000", os
.path
.join(mount_path
, "noperms")])
563 self
._volume
_client
_python
(self
.mount_b
, dedent("""
564 vp = VolumePath("{group_id}", u"{volume_id}")
572 # Check it's really gone
573 self
.assertEqual(self
.mount_a
.ls("volumes/_deleting"), [])
574 self
.assertEqual(self
.mount_a
.ls("volumes/"), ["_deleting", group_id
])
576 def test_readonly_authorization(self
):
578 That guest clients can be restricted to read-only mounts of volumes.
581 volumeclient_mount
= self
.mounts
[1]
582 guest_mount
= self
.mounts
[2]
583 volumeclient_mount
.umount_wait()
584 guest_mount
.umount_wait()
586 # Configure volumeclient_mount as the handle for driving volumeclient.
587 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
589 guest_entity
= "guest"
594 mount_path
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
595 vp = VolumePath("{group_id}", "{volume_id}")
596 create_result = vc.create_volume(vp, 1024*1024*10)
597 print create_result['mount_path']
603 # Authorize and configure credentials for the guest to mount the
604 # the volume with read-write access.
605 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
606 mount_path
, readonly
=False)
608 # Mount the volume, and write to it.
609 guest_mount
.mount(mount_path
=mount_path
)
610 guest_mount
.write_n_mb("data.bin", 1)
612 # Change the guest auth ID's authorization to read-only mount access.
613 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
614 vp = VolumePath("{group_id}", "{volume_id}")
615 vc.deauthorize(vp, "{guest_entity}")
619 guest_entity
=guest_entity
621 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
622 mount_path
, readonly
=True)
624 # The effect of the change in access level to read-only is not
625 # immediate. The guest sees the change only after a remount of
627 guest_mount
.umount_wait()
628 guest_mount
.mount(mount_path
=mount_path
)
630 # Read existing content of the volume.
631 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
632 # Cannot write into read-only volume.
633 with self
.assertRaises(CommandFailedError
):
634 guest_mount
.write_n_mb("rogue.bin", 1)
636 def test_get_authorized_ids(self
):
638 That for a volume, the authorized IDs and their access levels
639 can be obtained using CephFSVolumeClient's get_authorized_ids().
641 volumeclient_mount
= self
.mounts
[1]
642 volumeclient_mount
.umount_wait()
644 # Configure volumeclient_mount as the handle for driving volumeclient.
645 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
649 guest_entity_1
= "guest1"
650 guest_entity_2
= "guest2"
652 log
.info("print group ID: {0}".format(group_id
))
655 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
656 vp = VolumePath("{group_id}", "{volume_id}")
657 vc.create_volume(vp, 1024*1024*10)
658 auths = vc.get_authorized_ids(vp)
664 # Check the list of authorized IDs for the volume.
665 expected_result
= None
666 self
.assertEqual(str(expected_result
), auths
)
668 # Allow two auth IDs access to the volume.
669 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
670 vp = VolumePath("{group_id}", "{volume_id}")
671 vc.authorize(vp, "{guest_entity_1}", readonly=False)
672 vc.authorize(vp, "{guest_entity_2}", readonly=True)
673 auths = vc.get_authorized_ids(vp)
678 guest_entity_1
=guest_entity_1
,
679 guest_entity_2
=guest_entity_2
,
681 # Check the list of authorized IDs and their access levels.
682 expected_result
= [(u
'guest1', u
'rw'), (u
'guest2', u
'r')]
683 self
.assertItemsEqual(str(expected_result
), auths
)
685 # Disallow both the auth IDs' access to the volume.
686 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
687 vp = VolumePath("{group_id}", "{volume_id}")
688 vc.deauthorize(vp, "{guest_entity_1}")
689 vc.deauthorize(vp, "{guest_entity_2}")
690 auths = vc.get_authorized_ids(vp)
695 guest_entity_1
=guest_entity_1
,
696 guest_entity_2
=guest_entity_2
,
698 # Check the list of authorized IDs for the volume.
699 expected_result
= None
700 self
.assertItemsEqual(str(expected_result
), auths
)
702 def test_multitenant_volumes(self
):
704 That volume access can be restricted to a tenant.
706 That metadata used to enforce tenant isolation of
707 volumes is stored as a two-way mapping between auth
708 IDs and volumes that they're authorized to access.
710 volumeclient_mount
= self
.mounts
[1]
711 volumeclient_mount
.umount_wait()
713 # Configure volumeclient_mount as the handle for driving volumeclient.
714 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
717 volume_id
= "volumeid"
719 # Guest clients belonging to different tenants, but using the same
724 "tenant_id": "tenant1",
728 "tenant_id": "tenant2",
732 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
733 vp = VolumePath("{group_id}", "{volume_id}")
734 vc.create_volume(vp, 1024*1024*10)
740 # Check that volume metadata file is created on volume creation.
741 vol_metadata_filename
= "_{0}:{1}.meta".format(group_id
, volume_id
)
742 self
.assertIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
744 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
745 # 'tenant1', with 'rw' access to the volume.
746 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
747 vp = VolumePath("{group_id}", "{volume_id}")
748 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
752 auth_id
=guestclient_1
["auth_id"],
753 tenant_id
=guestclient_1
["tenant_id"]
756 # Check that auth metadata file for auth ID 'guest', is
757 # created on authorizing 'guest' access to the volume.
758 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
759 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
761 # Verify that the auth metadata file stores the tenant ID that the
762 # auth ID belongs to, the auth ID's authorized access levels
763 # for different volumes, versioning details, etc.
764 expected_auth_metadata
= {
766 u
"compat_version": 1,
768 u
"tenant_id": u
"tenant1",
770 u
"groupid/volumeid": {
772 u
"access_level": u
"rw",
777 auth_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
778 vp = VolumePath("{group_id}", "{volume_id}")
779 auth_metadata = vc._auth_metadata_get("{auth_id}")
784 auth_id
=guestclient_1
["auth_id"],
787 self
.assertItemsEqual(str(expected_auth_metadata
), auth_metadata
)
789 # Verify that the volume metadata file stores info about auth IDs
790 # and their access levels to the volume, versioning details, etc.
791 expected_vol_metadata
= {
793 u
"compat_version": 1,
797 u
"access_level": u
"rw"
802 vol_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
803 vp = VolumePath("{group_id}", "{volume_id}")
804 volume_metadata = vc._volume_metadata_get(vp)
805 print volume_metadata
810 self
.assertItemsEqual(str(expected_vol_metadata
), vol_metadata
)
812 # Cannot authorize 'guestclient_2' to access the volume.
813 # It uses auth ID 'guest', which has already been used by a
814 # 'guestclient_1' belonging to an another tenant for accessing
816 with self
.assertRaises(CommandFailedError
):
817 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
818 vp = VolumePath("{group_id}", "{volume_id}")
819 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
823 auth_id
=guestclient_2
["auth_id"],
824 tenant_id
=guestclient_2
["tenant_id"]
827 # Check that auth metadata file is cleaned up on removing
828 # auth ID's only access to a volume.
829 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
830 vp = VolumePath("{group_id}", "{volume_id}")
831 vc.deauthorize(vp, "{guest_entity}")
835 guest_entity
=guestclient_1
["auth_id"]
838 self
.assertNotIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
840 # Check that volume metadata file is cleaned up on volume deletion.
841 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
842 vp = VolumePath("{group_id}", "{volume_id}")
848 self
.assertNotIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
850 def test_recover_metadata(self
):
852 That volume client can recover from partial auth updates using
853 metadata files, which store auth info and its update status info.
855 volumeclient_mount
= self
.mounts
[1]
856 volumeclient_mount
.umount_wait()
858 # Configure volumeclient_mount as the handle for driving volumeclient.
859 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
862 volume_id
= "volumeid"
866 "tenant_id": "tenant",
870 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
871 vp = VolumePath("{group_id}", "{volume_id}")
872 vc.create_volume(vp, 1024*1024*10)
878 # Authorize 'guestclient' access to the volume.
879 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
880 vp = VolumePath("{group_id}", "{volume_id}")
881 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
885 auth_id
=guestclient
["auth_id"],
886 tenant_id
=guestclient
["tenant_id"]
889 # Check that auth metadata file for auth ID 'guest' is created.
890 auth_metadata_filename
= "${0}.meta".format(guestclient
["auth_id"])
891 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
893 # Induce partial auth update state by modifying the auth metadata file,
894 # and then run recovery procedure.
895 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
896 vp = VolumePath("{group_id}", "{volume_id}")
897 auth_metadata = vc._auth_metadata_get("{auth_id}")
898 auth_metadata['dirty'] = True
899 vc._auth_metadata_set("{auth_id}", auth_metadata)
904 auth_id
=guestclient
["auth_id"],