]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
3c639bbfc418bb06c173bf72a9cd1f59a5cc4543
5 from textwrap
import dedent
6 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
7 from tasks
.cephfs
.fuse_mount
import FuseMount
8 from teuthology
.exceptions
import CommandFailedError
10 log
= logging
.getLogger(__name__
)
13 class TestVolumeClient(CephFSTestCase
):
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
18 def _volume_client_python(self
, client
, script
, vol_prefix
=None, ns_prefix
=None):
19 # Can't dedent this *and* the script we pass in, because they might have different
20 # levels of indentation to begin with, so leave this string zero-indented
22 vol_prefix
= "\"" + vol_prefix
+ "\""
24 ns_prefix
= "\"" + ns_prefix
+ "\""
25 return client
.run_python("""
26 from ceph_volume_client import CephFSVolumeClient, VolumePath
28 log = logging.getLogger("ceph_volume_client")
29 log.addHandler(logging.StreamHandler())
30 log.setLevel(logging.DEBUG)
31 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
35 """.format(payload
=script
, conf_path
=client
.config_path
, vol_prefix
=vol_prefix
, ns_prefix
=ns_prefix
))
37 def _sudo_write_file(self
, remote
, path
, data
):
39 Write data to a remote file as super user
41 :param remote: Remote site.
42 :param path: Path on the remote being written to.
43 :param data: Data to be written.
45 Both perms and owner are passed directly to chmod.
52 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
58 def _configure_vc_auth(self
, mount
, id_name
):
60 Set up auth credentials for the VolumeClient user
62 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
63 "auth", "get-or-create", "client.{name}".format(name
=id_name
),
68 mount
.client_id
= id_name
69 self
._sudo
_write
_file
(mount
.client_remote
, mount
.get_keyring_path(), out
)
70 self
.set_conf("client.{name}".format(name
=id_name
), "keyring", mount
.get_keyring_path())
72 def _configure_guest_auth(self
, volumeclient_mount
, guest_mount
,
73 guest_entity
, mount_path
,
74 namespace_prefix
=None, readonly
=False,
77 Set up auth credentials for the guest client to mount a volume.
79 :param volumeclient_mount: mount used as the handle for driving
81 :param guest_mount: mount used by the guest client.
82 :param guest_entity: auth ID used by the guest client.
83 :param mount_path: path of the volume.
84 :param namespace_prefix: name prefix of the RADOS namespace, which
85 is used for the volume's layout.
86 :param readonly: defaults to False. If set to 'True' only read-only
87 mount access is granted to the guest.
88 :param tenant_id: (OpenStack) tenant ID of the guest client.
91 head
, volume_id
= os
.path
.split(mount_path
)
92 head
, group_id
= os
.path
.split(head
)
93 head
, volume_prefix
= os
.path
.split(head
)
94 volume_prefix
= "/" + volume_prefix
96 # Authorize the guest client's auth ID to mount the volume.
97 key
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
98 vp = VolumePath("{group_id}", "{volume_id}")
99 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
100 tenant_id="{tenant_id}")
101 print auth_result['auth_key']
105 guest_entity
=guest_entity
,
107 tenant_id
=tenant_id
)), volume_prefix
, namespace_prefix
110 # CephFSVolumeClient's authorize() does not return the secret
111 # key to a caller who isn't multi-tenant aware. Explicitly
112 # query the key for such a client.
114 key
= self
.fs
.mon_manager
.raw_cluster_cmd(
115 "auth", "get-key", "client.{name}".format(name
=guest_entity
),
118 # The guest auth ID should exist.
119 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
120 self
.assertIn("client.{0}".format(guest_entity
), existing_ids
)
122 # Create keyring file for the guest client.
123 keyring_txt
= dedent("""
124 [client.{guest_entity}]
128 guest_entity
=guest_entity
,
131 guest_mount
.client_id
= guest_entity
132 self
._sudo
_write
_file
(guest_mount
.client_remote
,
133 guest_mount
.get_keyring_path(),
136 # Add a guest client section to the ceph config file.
137 self
.set_conf("client.{0}".format(guest_entity
), "client quota", "True")
138 self
.set_conf("client.{0}".format(guest_entity
), "debug client", "20")
139 self
.set_conf("client.{0}".format(guest_entity
), "debug objecter", "20")
140 self
.set_conf("client.{0}".format(guest_entity
),
141 "keyring", guest_mount
.get_keyring_path())
143 def test_default_prefix(self
):
146 DEFAULT_VOL_PREFIX
= "volumes"
147 DEFAULT_NS_PREFIX
= "fsvolumens_"
149 self
.mount_b
.umount_wait()
150 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
152 #create a volume with default prefix
153 self
._volume
_client
_python
(self
.mount_b
, dedent("""
154 vp = VolumePath("{group_id}", "{volume_id}")
155 vc.create_volume(vp, 10, data_isolated=True)
161 # The dir should be created
162 self
.mount_a
.stat(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
))
164 #namespace should be set
165 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
166 namespace
= "{0}{1}".format(DEFAULT_NS_PREFIX
, volume_id
)
167 self
.assertEqual(namespace
, ns_in_attr
)
170 def test_lifecycle(self
):
172 General smoke test for create, extend, destroy
175 # I'm going to use mount_c later as a guest for mounting the created
177 self
.mounts
[2].umount_wait()
179 # I'm going to leave mount_b unmounted and just use it as a handle for
180 # driving volumeclient. It's a little hacky but we don't have a more
181 # general concept for librados/libcephfs clients as opposed to full
182 # blown mounting clients.
183 self
.mount_b
.umount_wait()
184 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
186 guest_entity
= "guest"
190 volume_prefix
= "/myprefix"
191 namespace_prefix
= "mynsprefix_"
193 # Create a 100MB volume
195 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
196 vp = VolumePath("{group_id}", "{volume_id}")
197 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
198 print create_result['mount_path']
202 volume_size
=volume_size
203 )), volume_prefix
, namespace_prefix
)
205 # The dir should be created
206 self
.mount_a
.stat(os
.path
.join("myprefix", group_id
, volume_id
))
208 # Authorize and configure credentials for the guest to mount the
210 self
._configure
_guest
_auth
(self
.mount_b
, self
.mounts
[2], guest_entity
,
211 mount_path
, namespace_prefix
)
212 self
.mounts
[2].mount(mount_path
=mount_path
)
214 # The kernel client doesn't have the quota-based df behaviour,
215 # or quotas at all, so only exercise the client behaviour when
217 if isinstance(self
.mounts
[2], FuseMount
):
218 # df should see volume size, same as the quota set on volume's dir
219 self
.assertEqual(self
.mounts
[2].df()['total'],
220 volume_size
* 1024 * 1024)
222 self
.mount_a
.getfattr(
223 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
224 "ceph.quota.max_bytes"),
225 "%s" % (volume_size
* 1024 * 1024))
227 # df granularity is 4MB block so have to write at least that much
229 self
.mounts
[2].write_n_mb("data.bin", data_bin_mb
)
231 # Write something outside volume to check this space usage is
232 # not reported in the volume's DF.
234 self
.mount_a
.write_n_mb("other.bin", other_bin_mb
)
236 # global: df should see all the writes (data + other). This is a >
237 # rather than a == because the global spaced used includes all pools
238 self
.assertGreater(self
.mount_a
.df()['used'],
239 (data_bin_mb
+ other_bin_mb
) * 1024 * 1024)
241 # Hack: do a metadata IO to kick rstats
242 self
.mounts
[2].run_shell(["touch", "foo"])
244 # volume: df should see the data_bin_mb consumed from quota, same
245 # as the rbytes for the volume's dir
246 self
.wait_until_equal(
247 lambda: self
.mounts
[2].df()['used'],
248 data_bin_mb
* 1024 * 1024, timeout
=60)
249 self
.wait_until_equal(
250 lambda: self
.mount_a
.getfattr(
251 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
253 "%s" % (data_bin_mb
* 1024 * 1024), timeout
=60)
255 # sync so that file data are persist to rados
256 self
.mounts
[2].run_shell(["sync"])
258 # Our data should stay in particular rados namespace
259 pool_name
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool")
260 namespace
= "{0}{1}".format(namespace_prefix
, volume_id
)
261 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
262 self
.assertEqual(namespace
, ns_in_attr
)
264 objects_in_ns
= set(self
.fs
.rados(["ls"], pool
=pool_name
, namespace
=namespace
).split("\n"))
265 self
.assertNotEqual(objects_in_ns
, set())
267 # De-authorize the guest
268 self
._volume
_client
_python
(self
.mount_b
, dedent("""
269 vp = VolumePath("{group_id}", "{volume_id}")
270 vc.deauthorize(vp, "{guest_entity}")
271 vc.evict("{guest_entity}")
275 guest_entity
=guest_entity
276 )), volume_prefix
, namespace_prefix
)
278 # Once deauthorized, the client should be unable to do any more metadata ops
279 # The way that the client currently behaves here is to block (it acts like
280 # it has lost network, because there is nothing to tell it that is messages
281 # are being dropped because it's identity is gone)
282 background
= self
.mounts
[2].write_n_mb("rogue.bin", 1, wait
=False)
283 time
.sleep(10) # Approximate check for 'stuck' as 'still running after 10s'
284 self
.assertFalse(background
.finished
)
286 # After deauthorisation, the client ID should be gone (this was the only
287 # volume it was authorised for)
288 self
.assertNotIn("client.{0}".format(guest_entity
), [e
['entity'] for e
in self
.auth_list()])
290 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
291 self
.mounts
[2].kill()
292 self
.mounts
[2].kill_cleanup()
295 except CommandFailedError
:
296 # We killed the mount out from under you
299 self
._volume
_client
_python
(self
.mount_b
, dedent("""
300 vp = VolumePath("{group_id}", "{volume_id}")
306 )), volume_prefix
, namespace_prefix
)
308 def test_idempotency(self
):
310 That the volumeclient interface works when calling everything twice
312 self
.mount_b
.umount_wait()
313 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
315 guest_entity
= "guest"
318 self
._volume
_client
_python
(self
.mount_b
, dedent("""
319 vp = VolumePath("{group_id}", "{volume_id}")
320 vc.create_volume(vp, 10)
321 vc.create_volume(vp, 10)
322 vc.authorize(vp, "{guest_entity}")
323 vc.authorize(vp, "{guest_entity}")
324 vc.deauthorize(vp, "{guest_entity}")
325 vc.deauthorize(vp, "{guest_entity}")
331 vc.create_volume(vp, 10, data_isolated=True)
332 vc.create_volume(vp, 10, data_isolated=True)
333 vc.authorize(vp, "{guest_entity}")
334 vc.authorize(vp, "{guest_entity}")
335 vc.deauthorize(vp, "{guest_entity}")
336 vc.deauthorize(vp, "{guest_entity}")
337 vc.evict("{guest_entity}")
338 vc.evict("{guest_entity}")
339 vc.delete_volume(vp, data_isolated=True)
340 vc.delete_volume(vp, data_isolated=True)
341 vc.purge_volume(vp, data_isolated=True)
342 vc.purge_volume(vp, data_isolated=True)
346 guest_entity
=guest_entity
349 def test_data_isolated(self
):
351 That data isolated shares get their own pool
355 # Because the teuthology config template sets mon_pg_warn_max_per_osd to
356 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
357 # sane before using volume_client, to avoid creating pools with absurdly large
359 self
.set_conf("global", "mon pg warn max per osd", "300")
360 for mon_daemon_state
in self
.ctx
.daemons
.iter_daemons_of_role('mon'):
361 mon_daemon_state
.restart()
363 self
.mount_b
.umount_wait()
364 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
366 # Calculate how many PGs we'll expect the new volume pool to have
367 osd_map
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
368 max_per_osd
= int(self
.fs
.get_config('mon_pg_warn_max_per_osd'))
369 osd_count
= len(osd_map
['osds'])
370 max_overall
= osd_count
* max_per_osd
372 existing_pg_count
= 0
373 for p
in osd_map
['pools']:
374 existing_pg_count
+= p
['pg_num']
376 expected_pg_num
= (max_overall
- existing_pg_count
) / 10
377 log
.info("max_per_osd {0}".format(max_per_osd
))
378 log
.info("osd_count {0}".format(osd_count
))
379 log
.info("max_overall {0}".format(max_overall
))
380 log
.info("existing_pg_count {0}".format(existing_pg_count
))
381 log
.info("expected_pg_num {0}".format(expected_pg_num
))
383 pools_a
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
387 self
._volume
_client
_python
(self
.mount_b
, dedent("""
388 vp = VolumePath("{group_id}", "{volume_id}")
389 vc.create_volume(vp, 10, data_isolated=True)
395 pools_b
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
397 # Should have created one new pool
398 new_pools
= set(p
['pool_name'] for p
in pools_b
) - set([p
['pool_name'] for p
in pools_a
])
399 self
.assertEqual(len(new_pools
), 1)
401 # It should have followed the heuristic for PG count
402 # (this is an overly strict test condition, so we may want to remove
403 # it at some point as/when the logic gets fancier)
404 created_pg_num
= self
.fs
.mon_manager
.get_pool_property(list(new_pools
)[0], "pg_num")
405 self
.assertEqual(expected_pg_num
, created_pg_num
)
407 def test_15303(self
):
409 Reproducer for #15303 "Client holds incorrect complete flag on dir
410 after losing caps" (http://tracker.ceph.com/issues/15303)
412 for m
in self
.mounts
:
415 # Create a dir on mount A
417 self
.mount_a
.run_shell(["mkdir", "parent1"])
418 self
.mount_a
.run_shell(["mkdir", "parent2"])
419 self
.mount_a
.run_shell(["mkdir", "parent1/mydir"])
421 # Put some files in it from mount B
423 self
.mount_b
.run_shell(["touch", "parent1/mydir/afile"])
424 self
.mount_b
.umount_wait()
426 # List the dir's contents on mount A
427 self
.assertListEqual(self
.mount_a
.ls("parent1/mydir"),
430 def test_evict_client(self
):
432 That a volume client can be evicted based on its auth ID and the volume
436 if not isinstance(self
.mount_a
, FuseMount
):
437 self
.skipTest("Requires FUSE client to inject client metadata")
439 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
440 # and mounts[3] would be used as guests to mount the volumes/shares.
442 for i
in range(1, 4):
443 self
.mounts
[i
].umount_wait()
445 volumeclient_mount
= self
.mounts
[1]
446 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
447 guest_mounts
= (self
.mounts
[2], self
.mounts
[3])
449 guest_entity
= "guest"
454 # Create two volumes. Authorize 'guest' auth ID to mount the two
455 # volumes. Mount the two volumes. Write data to the volumes.
458 volume_ids
.append("volid_{0}".format(str(i
)))
460 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
461 vp = VolumePath("{group_id}", "{volume_id}")
462 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
463 print create_result['mount_path']
466 volume_id
=volume_ids
[i
]
469 # Authorize 'guest' auth ID to mount the volume.
470 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mounts
[i
],
471 guest_entity
, mount_paths
[i
])
474 guest_mounts
[i
].mountpoint_dir_name
= 'mnt.{id}.{suffix}'.format(
475 id=guest_entity
, suffix
=str(i
))
476 guest_mounts
[i
].mount(mount_path
=mount_paths
[i
])
477 guest_mounts
[i
].write_n_mb("data.bin", 1)
480 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
482 self
._volume
_client
_python
(self
.mount_b
, dedent("""
483 vp = VolumePath("{group_id}", "{volume_id}")
484 vc.deauthorize(vp, "{guest_entity}")
485 vc.evict("{guest_entity}", volume_path=vp)
488 volume_id
=volume_ids
[0],
489 guest_entity
=guest_entity
492 # Evicted guest client, guest_mounts[0], should not be able to do
493 # anymore metadata ops. It should start failing all operations
494 # when it sees that its own address is in the blacklist.
496 guest_mounts
[0].write_n_mb("rogue.bin", 1)
497 except CommandFailedError
:
500 raise RuntimeError("post-eviction write should have failed!")
502 # The blacklisted guest client should now be unmountable
503 guest_mounts
[0].umount_wait()
505 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
506 # has mounted the other volume, should be able to use its volume
508 guest_mounts
[1].write_n_mb("data.bin.1", 1)
512 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
513 vp = VolumePath("{group_id}", "{volume_id}")
514 vc.deauthorize(vp, "{guest_entity}")
519 volume_id
=volume_ids
[i
],
520 guest_entity
=guest_entity
524 def test_purge(self
):
526 Reproducer for #15266, exception trying to purge volumes that
527 contain non-ascii filenames.
529 Additionally test any other purge corner cases here.
531 # I'm going to leave mount_b unmounted and just use it as a handle for
532 # driving volumeclient. It's a little hacky but we don't have a more
533 # general concept for librados/libcephfs clients as opposed to full
534 # blown mounting clients.
535 self
.mount_b
.umount_wait()
536 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
539 # Use a unicode volume ID (like Manila), to reproduce #15266
543 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
544 vp = VolumePath("{group_id}", u"{volume_id}")
545 create_result = vc.create_volume(vp, 10)
546 print create_result['mount_path']
553 mount_path
= mount_path
[1:]
555 # A file with non-ascii characters
556 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, u
"b\u00F6b")])
558 # A file with no permissions to do anything
559 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, "noperms")])
560 self
.mount_a
.run_shell(["chmod", "0000", os
.path
.join(mount_path
, "noperms")])
562 self
._volume
_client
_python
(self
.mount_b
, dedent("""
563 vp = VolumePath("{group_id}", u"{volume_id}")
571 # Check it's really gone
572 self
.assertEqual(self
.mount_a
.ls("volumes/_deleting"), [])
573 self
.assertEqual(self
.mount_a
.ls("volumes/"), ["_deleting", group_id
])
575 def test_readonly_authorization(self
):
577 That guest clients can be restricted to read-only mounts of volumes.
580 volumeclient_mount
= self
.mounts
[1]
581 guest_mount
= self
.mounts
[2]
582 volumeclient_mount
.umount_wait()
583 guest_mount
.umount_wait()
585 # Configure volumeclient_mount as the handle for driving volumeclient.
586 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
588 guest_entity
= "guest"
593 mount_path
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
594 vp = VolumePath("{group_id}", "{volume_id}")
595 create_result = vc.create_volume(vp, 1024*1024*10)
596 print create_result['mount_path']
602 # Authorize and configure credentials for the guest to mount the
603 # the volume with read-write access.
604 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
605 mount_path
, readonly
=False)
607 # Mount the volume, and write to it.
608 guest_mount
.mount(mount_path
=mount_path
)
609 guest_mount
.write_n_mb("data.bin", 1)
611 # Change the guest auth ID's authorization to read-only mount access.
612 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
613 vp = VolumePath("{group_id}", "{volume_id}")
614 vc.deauthorize(vp, "{guest_entity}")
618 guest_entity
=guest_entity
620 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
621 mount_path
, readonly
=True)
623 # The effect of the change in access level to read-only is not
624 # immediate. The guest sees the change only after a remount of
626 guest_mount
.umount_wait()
627 guest_mount
.mount(mount_path
=mount_path
)
629 # Read existing content of the volume.
630 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
631 # Cannot write into read-only volume.
632 with self
.assertRaises(CommandFailedError
):
633 guest_mount
.write_n_mb("rogue.bin", 1)
635 def test_get_authorized_ids(self
):
637 That for a volume, the authorized IDs and their access levels
638 can be obtained using CephFSVolumeClient's get_authorized_ids().
640 volumeclient_mount
= self
.mounts
[1]
641 volumeclient_mount
.umount_wait()
643 # Configure volumeclient_mount as the handle for driving volumeclient.
644 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
648 guest_entity_1
= "guest1"
649 guest_entity_2
= "guest2"
651 log
.info("print group ID: {0}".format(group_id
))
654 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
655 vp = VolumePath("{group_id}", "{volume_id}")
656 vc.create_volume(vp, 1024*1024*10)
657 auths = vc.get_authorized_ids(vp)
663 # Check the list of authorized IDs for the volume.
664 expected_result
= None
665 self
.assertEqual(str(expected_result
), auths
)
667 # Allow two auth IDs access to the volume.
668 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
669 vp = VolumePath("{group_id}", "{volume_id}")
670 vc.authorize(vp, "{guest_entity_1}", readonly=False)
671 vc.authorize(vp, "{guest_entity_2}", readonly=True)
672 auths = vc.get_authorized_ids(vp)
677 guest_entity_1
=guest_entity_1
,
678 guest_entity_2
=guest_entity_2
,
680 # Check the list of authorized IDs and their access levels.
681 expected_result
= [(u
'guest1', u
'rw'), (u
'guest2', u
'r')]
682 self
.assertItemsEqual(str(expected_result
), auths
)
684 # Disallow both the auth IDs' access to the volume.
685 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
686 vp = VolumePath("{group_id}", "{volume_id}")
687 vc.deauthorize(vp, "{guest_entity_1}")
688 vc.deauthorize(vp, "{guest_entity_2}")
689 auths = vc.get_authorized_ids(vp)
694 guest_entity_1
=guest_entity_1
,
695 guest_entity_2
=guest_entity_2
,
697 # Check the list of authorized IDs for the volume.
698 expected_result
= None
699 self
.assertItemsEqual(str(expected_result
), auths
)
701 def test_multitenant_volumes(self
):
703 That volume access can be restricted to a tenant.
705 That metadata used to enforce tenant isolation of
706 volumes is stored as a two-way mapping between auth
707 IDs and volumes that they're authorized to access.
709 volumeclient_mount
= self
.mounts
[1]
710 volumeclient_mount
.umount_wait()
712 # Configure volumeclient_mount as the handle for driving volumeclient.
713 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
716 volume_id
= "volumeid"
718 # Guest clients belonging to different tenants, but using the same
723 "tenant_id": "tenant1",
727 "tenant_id": "tenant2",
731 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
732 vp = VolumePath("{group_id}", "{volume_id}")
733 vc.create_volume(vp, 1024*1024*10)
739 # Check that volume metadata file is created on volume creation.
740 vol_metadata_filename
= "_{0}:{1}.meta".format(group_id
, volume_id
)
741 self
.assertIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
743 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
744 # 'tenant1', with 'rw' access to the volume.
745 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
746 vp = VolumePath("{group_id}", "{volume_id}")
747 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
751 auth_id
=guestclient_1
["auth_id"],
752 tenant_id
=guestclient_1
["tenant_id"]
755 # Check that auth metadata file for auth ID 'guest', is
756 # created on authorizing 'guest' access to the volume.
757 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
758 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
760 # Verify that the auth metadata file stores the tenant ID that the
761 # auth ID belongs to, the auth ID's authorized access levels
762 # for different volumes, versioning details, etc.
763 expected_auth_metadata
= {
765 u
"compat_version": 1,
767 u
"tenant_id": u
"tenant1",
769 u
"groupid/volumeid": {
771 u
"access_level": u
"rw",
776 auth_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
777 vp = VolumePath("{group_id}", "{volume_id}")
778 auth_metadata = vc._auth_metadata_get("{auth_id}")
783 auth_id
=guestclient_1
["auth_id"],
786 self
.assertItemsEqual(str(expected_auth_metadata
), auth_metadata
)
788 # Verify that the volume metadata file stores info about auth IDs
789 # and their access levels to the volume, versioning details, etc.
790 expected_vol_metadata
= {
792 u
"compat_version": 1,
796 u
"access_level": u
"rw"
801 vol_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
802 vp = VolumePath("{group_id}", "{volume_id}")
803 volume_metadata = vc._volume_metadata_get(vp)
804 print volume_metadata
809 self
.assertItemsEqual(str(expected_vol_metadata
), vol_metadata
)
811 # Cannot authorize 'guestclient_2' to access the volume.
812 # It uses auth ID 'guest', which has already been used by a
813 # 'guestclient_1' belonging to an another tenant for accessing
815 with self
.assertRaises(CommandFailedError
):
816 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
817 vp = VolumePath("{group_id}", "{volume_id}")
818 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
822 auth_id
=guestclient_2
["auth_id"],
823 tenant_id
=guestclient_2
["tenant_id"]
826 # Check that auth metadata file is cleaned up on removing
827 # auth ID's only access to a volume.
828 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
829 vp = VolumePath("{group_id}", "{volume_id}")
830 vc.deauthorize(vp, "{guest_entity}")
834 guest_entity
=guestclient_1
["auth_id"]
837 self
.assertNotIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
839 # Check that volume metadata file is cleaned up on volume deletion.
840 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
841 vp = VolumePath("{group_id}", "{volume_id}")
847 self
.assertNotIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
849 def test_recover_metadata(self
):
851 That volume client can recover from partial auth updates using
852 metadata files, which store auth info and its update status info.
854 volumeclient_mount
= self
.mounts
[1]
855 volumeclient_mount
.umount_wait()
857 # Configure volumeclient_mount as the handle for driving volumeclient.
858 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
861 volume_id
= "volumeid"
865 "tenant_id": "tenant",
869 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
870 vp = VolumePath("{group_id}", "{volume_id}")
871 vc.create_volume(vp, 1024*1024*10)
877 # Authorize 'guestclient' access to the volume.
878 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
879 vp = VolumePath("{group_id}", "{volume_id}")
880 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
884 auth_id
=guestclient
["auth_id"],
885 tenant_id
=guestclient
["tenant_id"]
888 # Check that auth metadata file for auth ID 'guest' is created.
889 auth_metadata_filename
= "${0}.meta".format(guestclient
["auth_id"])
890 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
892 # Induce partial auth update state by modifying the auth metadata file,
893 # and then run recovery procedure.
894 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
895 vp = VolumePath("{group_id}", "{volume_id}")
896 auth_metadata = vc._auth_metadata_get("{auth_id}")
897 auth_metadata['dirty'] = True
898 vc._auth_metadata_set("{auth_id}", auth_metadata)
903 auth_id
=guestclient
["auth_id"],