]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
5 from textwrap
import dedent
6 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
7 from tasks
.cephfs
.fuse_mount
import FuseMount
8 from teuthology
.exceptions
import CommandFailedError
10 log
= logging
.getLogger(__name__
)
13 class TestVolumeClient(CephFSTestCase
):
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
18 def _volume_client_python(self
, client
, script
, vol_prefix
=None, ns_prefix
=None):
19 # Can't dedent this *and* the script we pass in, because they might have different
20 # levels of indentation to begin with, so leave this string zero-indented
22 vol_prefix
= "\"" + vol_prefix
+ "\""
24 ns_prefix
= "\"" + ns_prefix
+ "\""
25 return client
.run_python("""
26 from ceph_volume_client import CephFSVolumeClient, VolumePath
28 log = logging.getLogger("ceph_volume_client")
29 log.addHandler(logging.StreamHandler())
30 log.setLevel(logging.DEBUG)
31 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
35 """.format(payload
=script
, conf_path
=client
.config_path
, vol_prefix
=vol_prefix
, ns_prefix
=ns_prefix
))
37 def _sudo_write_file(self
, remote
, path
, data
):
39 Write data to a remote file as super user
41 :param remote: Remote site.
42 :param path: Path on the remote being written to.
43 :param data: Data to be written.
45 Both perms and owner are passed directly to chmod.
52 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
58 def _configure_vc_auth(self
, mount
, id_name
):
60 Set up auth credentials for the VolumeClient user
62 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
63 "auth", "get-or-create", "client.{name}".format(name
=id_name
),
68 mount
.client_id
= id_name
69 self
._sudo
_write
_file
(mount
.client_remote
, mount
.get_keyring_path(), out
)
70 self
.set_conf("client.{name}".format(name
=id_name
), "keyring", mount
.get_keyring_path())
72 def _configure_guest_auth(self
, volumeclient_mount
, guest_mount
,
73 guest_entity
, mount_path
,
74 namespace_prefix
=None, readonly
=False,
77 Set up auth credentials for the guest client to mount a volume.
79 :param volumeclient_mount: mount used as the handle for driving
81 :param guest_mount: mount used by the guest client.
82 :param guest_entity: auth ID used by the guest client.
83 :param mount_path: path of the volume.
84 :param namespace_prefix: name prefix of the RADOS namespace, which
85 is used for the volume's layout.
86 :param readonly: defaults to False. If set to 'True' only read-only
87 mount access is granted to the guest.
88 :param tenant_id: (OpenStack) tenant ID of the guest client.
91 head
, volume_id
= os
.path
.split(mount_path
)
92 head
, group_id
= os
.path
.split(head
)
93 head
, volume_prefix
= os
.path
.split(head
)
94 volume_prefix
= "/" + volume_prefix
96 # Authorize the guest client's auth ID to mount the volume.
97 key
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
98 vp = VolumePath("{group_id}", "{volume_id}")
99 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
100 tenant_id="{tenant_id}")
101 print auth_result['auth_key']
105 guest_entity
=guest_entity
,
107 tenant_id
=tenant_id
)), volume_prefix
, namespace_prefix
110 # CephFSVolumeClient's authorize() does not return the secret
111 # key to a caller who isn't multi-tenant aware. Explicitly
112 # query the key for such a client.
114 key
= self
.fs
.mon_manager
.raw_cluster_cmd(
115 "auth", "get-key", "client.{name}".format(name
=guest_entity
),
118 # The guest auth ID should exist.
119 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
120 self
.assertIn("client.{0}".format(guest_entity
), existing_ids
)
122 # Create keyring file for the guest client.
123 keyring_txt
= dedent("""
124 [client.{guest_entity}]
128 guest_entity
=guest_entity
,
131 guest_mount
.client_id
= guest_entity
132 self
._sudo
_write
_file
(guest_mount
.client_remote
,
133 guest_mount
.get_keyring_path(),
136 # Add a guest client section to the ceph config file.
137 self
.set_conf("client.{0}".format(guest_entity
), "client quota", "True")
138 self
.set_conf("client.{0}".format(guest_entity
), "debug client", "20")
139 self
.set_conf("client.{0}".format(guest_entity
), "debug objecter", "20")
140 self
.set_conf("client.{0}".format(guest_entity
),
141 "keyring", guest_mount
.get_keyring_path())
143 def test_default_prefix(self
):
146 DEFAULT_VOL_PREFIX
= "volumes"
147 DEFAULT_NS_PREFIX
= "fsvolumens_"
149 self
.mount_b
.umount_wait()
150 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
152 #create a volume with default prefix
153 self
._volume
_client
_python
(self
.mount_b
, dedent("""
154 vp = VolumePath("{group_id}", "{volume_id}")
155 vc.create_volume(vp, 10, data_isolated=True)
161 # The dir should be created
162 self
.mount_a
.stat(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
))
164 #namespace should be set
165 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
166 namespace
= "{0}{1}".format(DEFAULT_NS_PREFIX
, volume_id
)
167 self
.assertEqual(namespace
, ns_in_attr
)
170 def test_lifecycle(self
):
172 General smoke test for create, extend, destroy
175 # I'm going to use mount_c later as a guest for mounting the created
177 self
.mounts
[2].umount_wait()
179 # I'm going to leave mount_b unmounted and just use it as a handle for
180 # driving volumeclient. It's a little hacky but we don't have a more
181 # general concept for librados/libcephfs clients as opposed to full
182 # blown mounting clients.
183 self
.mount_b
.umount_wait()
184 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
186 guest_entity
= "guest"
190 volume_prefix
= "/myprefix"
191 namespace_prefix
= "mynsprefix_"
193 # Create a 100MB volume
195 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
196 vp = VolumePath("{group_id}", "{volume_id}")
197 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
198 print create_result['mount_path']
202 volume_size
=volume_size
203 )), volume_prefix
, namespace_prefix
)
205 # The dir should be created
206 self
.mount_a
.stat(os
.path
.join("myprefix", group_id
, volume_id
))
208 # Authorize and configure credentials for the guest to mount the
210 self
._configure
_guest
_auth
(self
.mount_b
, self
.mounts
[2], guest_entity
,
211 mount_path
, namespace_prefix
)
212 self
.mounts
[2].mount(mount_path
=mount_path
)
214 # The kernel client doesn't have the quota-based df behaviour,
215 # or quotas at all, so only exercise the client behaviour when
217 if isinstance(self
.mounts
[2], FuseMount
):
218 # df should see volume size, same as the quota set on volume's dir
219 self
.assertEqual(self
.mounts
[2].df()['total'],
220 volume_size
* 1024 * 1024)
222 self
.mount_a
.getfattr(
223 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
224 "ceph.quota.max_bytes"),
225 "%s" % (volume_size
* 1024 * 1024))
227 # df granularity is 4MB block so have to write at least that much
229 self
.mounts
[2].write_n_mb("data.bin", data_bin_mb
)
231 # Write something outside volume to check this space usage is
232 # not reported in the volume's DF.
234 self
.mount_a
.write_n_mb("other.bin", other_bin_mb
)
236 # global: df should see all the writes (data + other). This is a >
237 # rather than a == because the global spaced used includes all pools
239 used
= self
.mount_a
.df()['used']
240 return used
>= (other_bin_mb
* 1024 * 1024)
242 self
.wait_until_true(check_df
, timeout
=30)
244 # Hack: do a metadata IO to kick rstats
245 self
.mounts
[2].run_shell(["touch", "foo"])
247 # volume: df should see the data_bin_mb consumed from quota, same
248 # as the rbytes for the volume's dir
249 self
.wait_until_equal(
250 lambda: self
.mounts
[2].df()['used'],
251 data_bin_mb
* 1024 * 1024, timeout
=60)
252 self
.wait_until_equal(
253 lambda: self
.mount_a
.getfattr(
254 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
256 "%s" % (data_bin_mb
* 1024 * 1024), timeout
=60)
258 # sync so that file data are persist to rados
259 self
.mounts
[2].run_shell(["sync"])
261 # Our data should stay in particular rados namespace
262 pool_name
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool")
263 namespace
= "{0}{1}".format(namespace_prefix
, volume_id
)
264 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
265 self
.assertEqual(namespace
, ns_in_attr
)
267 objects_in_ns
= set(self
.fs
.rados(["ls"], pool
=pool_name
, namespace
=namespace
).split("\n"))
268 self
.assertNotEqual(objects_in_ns
, set())
270 # De-authorize the guest
271 self
._volume
_client
_python
(self
.mount_b
, dedent("""
272 vp = VolumePath("{group_id}", "{volume_id}")
273 vc.deauthorize(vp, "{guest_entity}")
274 vc.evict("{guest_entity}")
278 guest_entity
=guest_entity
279 )), volume_prefix
, namespace_prefix
)
281 # Once deauthorized, the client should be unable to do any more metadata ops
282 # The way that the client currently behaves here is to block (it acts like
283 # it has lost network, because there is nothing to tell it that is messages
284 # are being dropped because it's identity is gone)
285 background
= self
.mounts
[2].write_n_mb("rogue.bin", 1, wait
=False)
286 time
.sleep(10) # Approximate check for 'stuck' as 'still running after 10s'
287 self
.assertFalse(background
.finished
)
289 # After deauthorisation, the client ID should be gone (this was the only
290 # volume it was authorised for)
291 self
.assertNotIn("client.{0}".format(guest_entity
), [e
['entity'] for e
in self
.auth_list()])
293 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
294 self
.mounts
[2].kill()
295 self
.mounts
[2].kill_cleanup()
298 except CommandFailedError
:
299 # We killed the mount out from under you
302 self
._volume
_client
_python
(self
.mount_b
, dedent("""
303 vp = VolumePath("{group_id}", "{volume_id}")
309 )), volume_prefix
, namespace_prefix
)
311 def test_idempotency(self
):
313 That the volumeclient interface works when calling everything twice
315 self
.mount_b
.umount_wait()
316 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
318 guest_entity
= "guest"
321 self
._volume
_client
_python
(self
.mount_b
, dedent("""
322 vp = VolumePath("{group_id}", "{volume_id}")
323 vc.create_volume(vp, 10)
324 vc.create_volume(vp, 10)
325 vc.authorize(vp, "{guest_entity}")
326 vc.authorize(vp, "{guest_entity}")
327 vc.deauthorize(vp, "{guest_entity}")
328 vc.deauthorize(vp, "{guest_entity}")
334 vc.create_volume(vp, 10, data_isolated=True)
335 vc.create_volume(vp, 10, data_isolated=True)
336 vc.authorize(vp, "{guest_entity}")
337 vc.authorize(vp, "{guest_entity}")
338 vc.deauthorize(vp, "{guest_entity}")
339 vc.deauthorize(vp, "{guest_entity}")
340 vc.evict("{guest_entity}")
341 vc.evict("{guest_entity}")
342 vc.delete_volume(vp, data_isolated=True)
343 vc.delete_volume(vp, data_isolated=True)
344 vc.purge_volume(vp, data_isolated=True)
345 vc.purge_volume(vp, data_isolated=True)
349 guest_entity
=guest_entity
352 def test_data_isolated(self
):
354 That data isolated shares get their own pool
358 # Because the teuthology config template sets mon_pg_warn_max_per_osd to
359 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
360 # sane before using volume_client, to avoid creating pools with absurdly large
362 self
.set_conf("global", "mon pg warn max per osd", "300")
363 for mon_daemon_state
in self
.ctx
.daemons
.iter_daemons_of_role('mon'):
364 mon_daemon_state
.restart()
366 self
.mount_b
.umount_wait()
367 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
369 # Calculate how many PGs we'll expect the new volume pool to have
370 osd_map
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
371 max_per_osd
= int(self
.fs
.get_config('mon_pg_warn_max_per_osd'))
372 osd_count
= len(osd_map
['osds'])
373 max_overall
= osd_count
* max_per_osd
375 existing_pg_count
= 0
376 for p
in osd_map
['pools']:
377 existing_pg_count
+= p
['pg_num']
379 expected_pg_num
= (max_overall
- existing_pg_count
) / 10
380 log
.info("max_per_osd {0}".format(max_per_osd
))
381 log
.info("osd_count {0}".format(osd_count
))
382 log
.info("max_overall {0}".format(max_overall
))
383 log
.info("existing_pg_count {0}".format(existing_pg_count
))
384 log
.info("expected_pg_num {0}".format(expected_pg_num
))
386 pools_a
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
390 self
._volume
_client
_python
(self
.mount_b
, dedent("""
391 vp = VolumePath("{group_id}", "{volume_id}")
392 vc.create_volume(vp, 10, data_isolated=True)
398 pools_b
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
400 # Should have created one new pool
401 new_pools
= set(p
['pool_name'] for p
in pools_b
) - set([p
['pool_name'] for p
in pools_a
])
402 self
.assertEqual(len(new_pools
), 1)
404 # It should have followed the heuristic for PG count
405 # (this is an overly strict test condition, so we may want to remove
406 # it at some point as/when the logic gets fancier)
407 created_pg_num
= self
.fs
.mon_manager
.get_pool_property(list(new_pools
)[0], "pg_num")
408 self
.assertEqual(expected_pg_num
, created_pg_num
)
410 def test_15303(self
):
412 Reproducer for #15303 "Client holds incorrect complete flag on dir
413 after losing caps" (http://tracker.ceph.com/issues/15303)
415 for m
in self
.mounts
:
418 # Create a dir on mount A
420 self
.mount_a
.run_shell(["mkdir", "parent1"])
421 self
.mount_a
.run_shell(["mkdir", "parent2"])
422 self
.mount_a
.run_shell(["mkdir", "parent1/mydir"])
424 # Put some files in it from mount B
426 self
.mount_b
.run_shell(["touch", "parent1/mydir/afile"])
427 self
.mount_b
.umount_wait()
429 # List the dir's contents on mount A
430 self
.assertListEqual(self
.mount_a
.ls("parent1/mydir"),
433 def test_evict_client(self
):
435 That a volume client can be evicted based on its auth ID and the volume
439 if not isinstance(self
.mount_a
, FuseMount
):
440 self
.skipTest("Requires FUSE client to inject client metadata")
442 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
443 # and mounts[3] would be used as guests to mount the volumes/shares.
445 for i
in range(1, 4):
446 self
.mounts
[i
].umount_wait()
448 volumeclient_mount
= self
.mounts
[1]
449 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
450 guest_mounts
= (self
.mounts
[2], self
.mounts
[3])
452 guest_entity
= "guest"
457 # Create two volumes. Authorize 'guest' auth ID to mount the two
458 # volumes. Mount the two volumes. Write data to the volumes.
461 volume_ids
.append("volid_{0}".format(str(i
)))
463 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
464 vp = VolumePath("{group_id}", "{volume_id}")
465 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
466 print create_result['mount_path']
469 volume_id
=volume_ids
[i
]
472 # Authorize 'guest' auth ID to mount the volume.
473 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mounts
[i
],
474 guest_entity
, mount_paths
[i
])
477 guest_mounts
[i
].mountpoint_dir_name
= 'mnt.{id}.{suffix}'.format(
478 id=guest_entity
, suffix
=str(i
))
479 guest_mounts
[i
].mount(mount_path
=mount_paths
[i
])
480 guest_mounts
[i
].write_n_mb("data.bin", 1)
483 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
485 self
._volume
_client
_python
(self
.mount_b
, dedent("""
486 vp = VolumePath("{group_id}", "{volume_id}")
487 vc.deauthorize(vp, "{guest_entity}")
488 vc.evict("{guest_entity}", volume_path=vp)
491 volume_id
=volume_ids
[0],
492 guest_entity
=guest_entity
495 # Evicted guest client, guest_mounts[0], should not be able to do
496 # anymore metadata ops. It should start failing all operations
497 # when it sees that its own address is in the blacklist.
499 guest_mounts
[0].write_n_mb("rogue.bin", 1)
500 except CommandFailedError
:
503 raise RuntimeError("post-eviction write should have failed!")
505 # The blacklisted guest client should now be unmountable
506 guest_mounts
[0].umount_wait()
508 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
509 # has mounted the other volume, should be able to use its volume
511 guest_mounts
[1].write_n_mb("data.bin.1", 1)
515 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
516 vp = VolumePath("{group_id}", "{volume_id}")
517 vc.deauthorize(vp, "{guest_entity}")
522 volume_id
=volume_ids
[i
],
523 guest_entity
=guest_entity
527 def test_purge(self
):
529 Reproducer for #15266, exception trying to purge volumes that
530 contain non-ascii filenames.
532 Additionally test any other purge corner cases here.
534 # I'm going to leave mount_b unmounted and just use it as a handle for
535 # driving volumeclient. It's a little hacky but we don't have a more
536 # general concept for librados/libcephfs clients as opposed to full
537 # blown mounting clients.
538 self
.mount_b
.umount_wait()
539 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
542 # Use a unicode volume ID (like Manila), to reproduce #15266
546 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
547 vp = VolumePath("{group_id}", u"{volume_id}")
548 create_result = vc.create_volume(vp, 10)
549 print create_result['mount_path']
556 mount_path
= mount_path
[1:]
558 # A file with non-ascii characters
559 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, u
"b\u00F6b")])
561 # A file with no permissions to do anything
562 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, "noperms")])
563 self
.mount_a
.run_shell(["chmod", "0000", os
.path
.join(mount_path
, "noperms")])
565 self
._volume
_client
_python
(self
.mount_b
, dedent("""
566 vp = VolumePath("{group_id}", u"{volume_id}")
574 # Check it's really gone
575 self
.assertEqual(self
.mount_a
.ls("volumes/_deleting"), [])
576 self
.assertEqual(self
.mount_a
.ls("volumes/"), ["_deleting", group_id
])
578 def test_readonly_authorization(self
):
580 That guest clients can be restricted to read-only mounts of volumes.
583 volumeclient_mount
= self
.mounts
[1]
584 guest_mount
= self
.mounts
[2]
585 volumeclient_mount
.umount_wait()
586 guest_mount
.umount_wait()
588 # Configure volumeclient_mount as the handle for driving volumeclient.
589 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
591 guest_entity
= "guest"
596 mount_path
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
597 vp = VolumePath("{group_id}", "{volume_id}")
598 create_result = vc.create_volume(vp, 1024*1024*10)
599 print create_result['mount_path']
605 # Authorize and configure credentials for the guest to mount the
606 # the volume with read-write access.
607 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
608 mount_path
, readonly
=False)
610 # Mount the volume, and write to it.
611 guest_mount
.mount(mount_path
=mount_path
)
612 guest_mount
.write_n_mb("data.bin", 1)
614 # Change the guest auth ID's authorization to read-only mount access.
615 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
616 vp = VolumePath("{group_id}", "{volume_id}")
617 vc.deauthorize(vp, "{guest_entity}")
621 guest_entity
=guest_entity
623 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
624 mount_path
, readonly
=True)
626 # The effect of the change in access level to read-only is not
627 # immediate. The guest sees the change only after a remount of
629 guest_mount
.umount_wait()
630 guest_mount
.mount(mount_path
=mount_path
)
632 # Read existing content of the volume.
633 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
634 # Cannot write into read-only volume.
635 with self
.assertRaises(CommandFailedError
):
636 guest_mount
.write_n_mb("rogue.bin", 1)
638 def test_get_authorized_ids(self
):
640 That for a volume, the authorized IDs and their access levels
641 can be obtained using CephFSVolumeClient's get_authorized_ids().
643 volumeclient_mount
= self
.mounts
[1]
644 volumeclient_mount
.umount_wait()
646 # Configure volumeclient_mount as the handle for driving volumeclient.
647 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
651 guest_entity_1
= "guest1"
652 guest_entity_2
= "guest2"
654 log
.info("print group ID: {0}".format(group_id
))
657 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
658 vp = VolumePath("{group_id}", "{volume_id}")
659 vc.create_volume(vp, 1024*1024*10)
660 auths = vc.get_authorized_ids(vp)
666 # Check the list of authorized IDs for the volume.
667 expected_result
= None
668 self
.assertEqual(str(expected_result
), auths
)
670 # Allow two auth IDs access to the volume.
671 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
672 vp = VolumePath("{group_id}", "{volume_id}")
673 vc.authorize(vp, "{guest_entity_1}", readonly=False)
674 vc.authorize(vp, "{guest_entity_2}", readonly=True)
675 auths = vc.get_authorized_ids(vp)
680 guest_entity_1
=guest_entity_1
,
681 guest_entity_2
=guest_entity_2
,
683 # Check the list of authorized IDs and their access levels.
684 expected_result
= [(u
'guest1', u
'rw'), (u
'guest2', u
'r')]
685 self
.assertItemsEqual(str(expected_result
), auths
)
687 # Disallow both the auth IDs' access to the volume.
688 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
689 vp = VolumePath("{group_id}", "{volume_id}")
690 vc.deauthorize(vp, "{guest_entity_1}")
691 vc.deauthorize(vp, "{guest_entity_2}")
692 auths = vc.get_authorized_ids(vp)
697 guest_entity_1
=guest_entity_1
,
698 guest_entity_2
=guest_entity_2
,
700 # Check the list of authorized IDs for the volume.
701 expected_result
= None
702 self
.assertItemsEqual(str(expected_result
), auths
)
704 def test_multitenant_volumes(self
):
706 That volume access can be restricted to a tenant.
708 That metadata used to enforce tenant isolation of
709 volumes is stored as a two-way mapping between auth
710 IDs and volumes that they're authorized to access.
712 volumeclient_mount
= self
.mounts
[1]
713 volumeclient_mount
.umount_wait()
715 # Configure volumeclient_mount as the handle for driving volumeclient.
716 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
719 volume_id
= "volumeid"
721 # Guest clients belonging to different tenants, but using the same
726 "tenant_id": "tenant1",
730 "tenant_id": "tenant2",
734 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
735 vp = VolumePath("{group_id}", "{volume_id}")
736 vc.create_volume(vp, 1024*1024*10)
742 # Check that volume metadata file is created on volume creation.
743 vol_metadata_filename
= "_{0}:{1}.meta".format(group_id
, volume_id
)
744 self
.assertIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
746 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
747 # 'tenant1', with 'rw' access to the volume.
748 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
749 vp = VolumePath("{group_id}", "{volume_id}")
750 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
754 auth_id
=guestclient_1
["auth_id"],
755 tenant_id
=guestclient_1
["tenant_id"]
758 # Check that auth metadata file for auth ID 'guest', is
759 # created on authorizing 'guest' access to the volume.
760 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
761 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
763 # Verify that the auth metadata file stores the tenant ID that the
764 # auth ID belongs to, the auth ID's authorized access levels
765 # for different volumes, versioning details, etc.
766 expected_auth_metadata
= {
768 u
"compat_version": 1,
770 u
"tenant_id": u
"tenant1",
772 u
"groupid/volumeid": {
774 u
"access_level": u
"rw",
779 auth_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
780 vp = VolumePath("{group_id}", "{volume_id}")
781 auth_metadata = vc._auth_metadata_get("{auth_id}")
786 auth_id
=guestclient_1
["auth_id"],
789 self
.assertItemsEqual(str(expected_auth_metadata
), auth_metadata
)
791 # Verify that the volume metadata file stores info about auth IDs
792 # and their access levels to the volume, versioning details, etc.
793 expected_vol_metadata
= {
795 u
"compat_version": 1,
799 u
"access_level": u
"rw"
804 vol_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
805 vp = VolumePath("{group_id}", "{volume_id}")
806 volume_metadata = vc._volume_metadata_get(vp)
807 print volume_metadata
812 self
.assertItemsEqual(str(expected_vol_metadata
), vol_metadata
)
814 # Cannot authorize 'guestclient_2' to access the volume.
815 # It uses auth ID 'guest', which has already been used by a
816 # 'guestclient_1' belonging to an another tenant for accessing
818 with self
.assertRaises(CommandFailedError
):
819 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
820 vp = VolumePath("{group_id}", "{volume_id}")
821 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
825 auth_id
=guestclient_2
["auth_id"],
826 tenant_id
=guestclient_2
["tenant_id"]
829 # Check that auth metadata file is cleaned up on removing
830 # auth ID's only access to a volume.
831 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
832 vp = VolumePath("{group_id}", "{volume_id}")
833 vc.deauthorize(vp, "{guest_entity}")
837 guest_entity
=guestclient_1
["auth_id"]
840 self
.assertNotIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
842 # Check that volume metadata file is cleaned up on volume deletion.
843 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
844 vp = VolumePath("{group_id}", "{volume_id}")
850 self
.assertNotIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
852 def test_recover_metadata(self
):
854 That volume client can recover from partial auth updates using
855 metadata files, which store auth info and its update status info.
857 volumeclient_mount
= self
.mounts
[1]
858 volumeclient_mount
.umount_wait()
860 # Configure volumeclient_mount as the handle for driving volumeclient.
861 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
864 volume_id
= "volumeid"
868 "tenant_id": "tenant",
872 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
873 vp = VolumePath("{group_id}", "{volume_id}")
874 vc.create_volume(vp, 1024*1024*10)
880 # Authorize 'guestclient' access to the volume.
881 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
882 vp = VolumePath("{group_id}", "{volume_id}")
883 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
887 auth_id
=guestclient
["auth_id"],
888 tenant_id
=guestclient
["tenant_id"]
891 # Check that auth metadata file for auth ID 'guest' is created.
892 auth_metadata_filename
= "${0}.meta".format(guestclient
["auth_id"])
893 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
895 # Induce partial auth update state by modifying the auth metadata file,
896 # and then run recovery procedure.
897 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
898 vp = VolumePath("{group_id}", "{volume_id}")
899 auth_metadata = vc._auth_metadata_get("{auth_id}")
900 auth_metadata['dirty'] = True
901 vc._auth_metadata_set("{auth_id}", auth_metadata)
906 auth_id
=guestclient
["auth_id"],