]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
5 from textwrap
import dedent
6 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
7 from tasks
.cephfs
.fuse_mount
import FuseMount
8 from teuthology
.exceptions
import CommandFailedError
10 log
= logging
.getLogger(__name__
)
13 class TestVolumeClient(CephFSTestCase
):
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
18 def _volume_client_python(self
, client
, script
, vol_prefix
=None, ns_prefix
=None):
19 # Can't dedent this *and* the script we pass in, because they might have different
20 # levels of indentation to begin with, so leave this string zero-indented
22 vol_prefix
= "\"" + vol_prefix
+ "\""
24 ns_prefix
= "\"" + ns_prefix
+ "\""
25 return client
.run_python("""
26 from ceph_volume_client import CephFSVolumeClient, VolumePath
28 log = logging.getLogger("ceph_volume_client")
29 log.addHandler(logging.StreamHandler())
30 log.setLevel(logging.DEBUG)
31 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
35 """.format(payload
=script
, conf_path
=client
.config_path
, vol_prefix
=vol_prefix
, ns_prefix
=ns_prefix
))
37 def _sudo_write_file(self
, remote
, path
, data
):
39 Write data to a remote file as super user
41 :param remote: Remote site.
42 :param path: Path on the remote being written to.
43 :param data: Data to be written.
45 Both perms and owner are passed directly to chmod.
52 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
58 def _configure_vc_auth(self
, mount
, id_name
):
60 Set up auth credentials for the VolumeClient user
62 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
63 "auth", "get-or-create", "client.{name}".format(name
=id_name
),
68 mount
.client_id
= id_name
69 self
._sudo
_write
_file
(mount
.client_remote
, mount
.get_keyring_path(), out
)
70 self
.set_conf("client.{name}".format(name
=id_name
), "keyring", mount
.get_keyring_path())
72 def _configure_guest_auth(self
, volumeclient_mount
, guest_mount
,
73 guest_entity
, mount_path
,
74 namespace_prefix
=None, readonly
=False,
77 Set up auth credentials for the guest client to mount a volume.
79 :param volumeclient_mount: mount used as the handle for driving
81 :param guest_mount: mount used by the guest client.
82 :param guest_entity: auth ID used by the guest client.
83 :param mount_path: path of the volume.
84 :param namespace_prefix: name prefix of the RADOS namespace, which
85 is used for the volume's layout.
86 :param readonly: defaults to False. If set to 'True' only read-only
87 mount access is granted to the guest.
88 :param tenant_id: (OpenStack) tenant ID of the guest client.
91 head
, volume_id
= os
.path
.split(mount_path
)
92 head
, group_id
= os
.path
.split(head
)
93 head
, volume_prefix
= os
.path
.split(head
)
94 volume_prefix
= "/" + volume_prefix
96 # Authorize the guest client's auth ID to mount the volume.
97 key
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
98 vp = VolumePath("{group_id}", "{volume_id}")
99 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
100 tenant_id="{tenant_id}")
101 print auth_result['auth_key']
105 guest_entity
=guest_entity
,
107 tenant_id
=tenant_id
)), volume_prefix
, namespace_prefix
110 # CephFSVolumeClient's authorize() does not return the secret
111 # key to a caller who isn't multi-tenant aware. Explicitly
112 # query the key for such a client.
114 key
= self
.fs
.mon_manager
.raw_cluster_cmd(
115 "auth", "get-key", "client.{name}".format(name
=guest_entity
),
118 # The guest auth ID should exist.
119 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
120 self
.assertIn("client.{0}".format(guest_entity
), existing_ids
)
122 # Create keyring file for the guest client.
123 keyring_txt
= dedent("""
124 [client.{guest_entity}]
128 guest_entity
=guest_entity
,
131 guest_mount
.client_id
= guest_entity
132 self
._sudo
_write
_file
(guest_mount
.client_remote
,
133 guest_mount
.get_keyring_path(),
136 # Add a guest client section to the ceph config file.
137 self
.set_conf("client.{0}".format(guest_entity
), "client quota", "True")
138 self
.set_conf("client.{0}".format(guest_entity
), "debug client", "20")
139 self
.set_conf("client.{0}".format(guest_entity
), "debug objecter", "20")
140 self
.set_conf("client.{0}".format(guest_entity
),
141 "keyring", guest_mount
.get_keyring_path())
143 def test_default_prefix(self
):
146 DEFAULT_VOL_PREFIX
= "volumes"
147 DEFAULT_NS_PREFIX
= "fsvolumens_"
149 self
.mount_b
.umount_wait()
150 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
152 #create a volume with default prefix
153 self
._volume
_client
_python
(self
.mount_b
, dedent("""
154 vp = VolumePath("{group_id}", "{volume_id}")
155 vc.create_volume(vp, 10, data_isolated=True)
161 # The dir should be created
162 self
.mount_a
.stat(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
))
164 #namespace should be set
165 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
166 namespace
= "{0}{1}".format(DEFAULT_NS_PREFIX
, volume_id
)
167 self
.assertEqual(namespace
, ns_in_attr
)
170 def test_lifecycle(self
):
172 General smoke test for create, extend, destroy
175 # I'm going to use mount_c later as a guest for mounting the created
177 self
.mounts
[2].umount_wait()
179 # I'm going to leave mount_b unmounted and just use it as a handle for
180 # driving volumeclient. It's a little hacky but we don't have a more
181 # general concept for librados/libcephfs clients as opposed to full
182 # blown mounting clients.
183 self
.mount_b
.umount_wait()
184 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
186 guest_entity
= "guest"
190 volume_prefix
= "/myprefix"
191 namespace_prefix
= "mynsprefix_"
193 # Create a 100MB volume
195 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
196 vp = VolumePath("{group_id}", "{volume_id}")
197 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
198 print create_result['mount_path']
202 volume_size
=volume_size
203 )), volume_prefix
, namespace_prefix
)
205 # The dir should be created
206 self
.mount_a
.stat(os
.path
.join("myprefix", group_id
, volume_id
))
208 # Authorize and configure credentials for the guest to mount the
210 self
._configure
_guest
_auth
(self
.mount_b
, self
.mounts
[2], guest_entity
,
211 mount_path
, namespace_prefix
)
212 self
.mounts
[2].mount(mount_path
=mount_path
)
214 # The kernel client doesn't have the quota-based df behaviour,
215 # or quotas at all, so only exercise the client behaviour when
217 if isinstance(self
.mounts
[2], FuseMount
):
218 # df should see volume size, same as the quota set on volume's dir
219 self
.assertEqual(self
.mounts
[2].df()['total'],
220 volume_size
* 1024 * 1024)
222 self
.mount_a
.getfattr(
223 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
224 "ceph.quota.max_bytes"),
225 "%s" % (volume_size
* 1024 * 1024))
227 # df granularity is 4MB block so have to write at least that much
229 self
.mounts
[2].write_n_mb("data.bin", data_bin_mb
)
231 # Write something outside volume to check this space usage is
232 # not reported in the volume's DF.
234 self
.mount_a
.write_n_mb("other.bin", other_bin_mb
)
236 # global: df should see all the writes (data + other). This is a >
237 # rather than a == because the global spaced used includes all pools
239 used
= self
.mount_a
.df()['used']
240 return used
>= (other_bin_mb
* 1024 * 1024)
242 self
.wait_until_true(check_df
, timeout
=30)
244 # Hack: do a metadata IO to kick rstats
245 self
.mounts
[2].run_shell(["touch", "foo"])
247 # volume: df should see the data_bin_mb consumed from quota, same
248 # as the rbytes for the volume's dir
249 self
.wait_until_equal(
250 lambda: self
.mounts
[2].df()['used'],
251 data_bin_mb
* 1024 * 1024, timeout
=60)
252 self
.wait_until_equal(
253 lambda: self
.mount_a
.getfattr(
254 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
256 "%s" % (data_bin_mb
* 1024 * 1024), timeout
=60)
258 # sync so that file data are persist to rados
259 self
.mounts
[2].run_shell(["sync"])
261 # Our data should stay in particular rados namespace
262 pool_name
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool")
263 namespace
= "{0}{1}".format(namespace_prefix
, volume_id
)
264 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
265 self
.assertEqual(namespace
, ns_in_attr
)
267 objects_in_ns
= set(self
.fs
.rados(["ls"], pool
=pool_name
, namespace
=namespace
).split("\n"))
268 self
.assertNotEqual(objects_in_ns
, set())
270 # De-authorize the guest
271 self
._volume
_client
_python
(self
.mount_b
, dedent("""
272 vp = VolumePath("{group_id}", "{volume_id}")
273 vc.deauthorize(vp, "{guest_entity}")
274 vc.evict("{guest_entity}")
278 guest_entity
=guest_entity
279 )), volume_prefix
, namespace_prefix
)
281 # Once deauthorized, the client should be unable to do any more metadata ops
282 # The way that the client currently behaves here is to block (it acts like
283 # it has lost network, because there is nothing to tell it that is messages
284 # are being dropped because it's identity is gone)
285 background
= self
.mounts
[2].write_n_mb("rogue.bin", 1, wait
=False)
288 except CommandFailedError
:
289 # command failed with EBLACKLISTED?
290 if "transport endpoint shutdown" in background
.stderr
.getvalue():
295 # After deauthorisation, the client ID should be gone (this was the only
296 # volume it was authorised for)
297 self
.assertNotIn("client.{0}".format(guest_entity
), [e
['entity'] for e
in self
.auth_list()])
299 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
300 self
.mounts
[2].umount_wait()
302 self
._volume
_client
_python
(self
.mount_b
, dedent("""
303 vp = VolumePath("{group_id}", "{volume_id}")
309 )), volume_prefix
, namespace_prefix
)
311 def test_idempotency(self
):
313 That the volumeclient interface works when calling everything twice
315 self
.mount_b
.umount_wait()
316 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
318 guest_entity
= "guest"
321 self
._volume
_client
_python
(self
.mount_b
, dedent("""
322 vp = VolumePath("{group_id}", "{volume_id}")
323 vc.create_volume(vp, 10)
324 vc.create_volume(vp, 10)
325 vc.authorize(vp, "{guest_entity}")
326 vc.authorize(vp, "{guest_entity}")
327 vc.deauthorize(vp, "{guest_entity}")
328 vc.deauthorize(vp, "{guest_entity}")
334 vc.create_volume(vp, 10, data_isolated=True)
335 vc.create_volume(vp, 10, data_isolated=True)
336 vc.authorize(vp, "{guest_entity}")
337 vc.authorize(vp, "{guest_entity}")
338 vc.deauthorize(vp, "{guest_entity}")
339 vc.deauthorize(vp, "{guest_entity}")
340 vc.evict("{guest_entity}")
341 vc.evict("{guest_entity}")
342 vc.delete_volume(vp, data_isolated=True)
343 vc.delete_volume(vp, data_isolated=True)
344 vc.purge_volume(vp, data_isolated=True)
345 vc.purge_volume(vp, data_isolated=True)
347 vc.create_volume(vp, 10, namespace_isolated=False)
348 vc.create_volume(vp, 10, namespace_isolated=False)
349 vc.authorize(vp, "{guest_entity}")
350 vc.authorize(vp, "{guest_entity}")
351 vc.deauthorize(vp, "{guest_entity}")
352 vc.deauthorize(vp, "{guest_entity}")
353 vc.evict("{guest_entity}")
354 vc.evict("{guest_entity}")
362 guest_entity
=guest_entity
365 def test_data_isolated(self
):
367 That data isolated shares get their own pool
371 # Because the teuthology config template sets mon_max_pg_per_osd to
372 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
373 # sane before using volume_client, to avoid creating pools with absurdly large
375 self
.set_conf("global", "mon max pg per osd", "300")
376 for mon_daemon_state
in self
.ctx
.daemons
.iter_daemons_of_role('mon'):
377 mon_daemon_state
.restart()
379 self
.mount_b
.umount_wait()
380 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
382 # Calculate how many PGs we'll expect the new volume pool to have
383 osd_map
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
384 max_per_osd
= int(self
.fs
.get_config('mon_max_pg_per_osd'))
385 osd_count
= len(osd_map
['osds'])
386 max_overall
= osd_count
* max_per_osd
388 existing_pg_count
= 0
389 for p
in osd_map
['pools']:
390 existing_pg_count
+= p
['pg_num']
392 expected_pg_num
= (max_overall
- existing_pg_count
) / 10
393 log
.info("max_per_osd {0}".format(max_per_osd
))
394 log
.info("osd_count {0}".format(osd_count
))
395 log
.info("max_overall {0}".format(max_overall
))
396 log
.info("existing_pg_count {0}".format(existing_pg_count
))
397 log
.info("expected_pg_num {0}".format(expected_pg_num
))
399 pools_a
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
403 self
._volume
_client
_python
(self
.mount_b
, dedent("""
404 vp = VolumePath("{group_id}", "{volume_id}")
405 vc.create_volume(vp, 10, data_isolated=True)
411 pools_b
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
413 # Should have created one new pool
414 new_pools
= set(p
['pool_name'] for p
in pools_b
) - set([p
['pool_name'] for p
in pools_a
])
415 self
.assertEqual(len(new_pools
), 1)
417 # It should have followed the heuristic for PG count
418 # (this is an overly strict test condition, so we may want to remove
419 # it at some point as/when the logic gets fancier)
420 created_pg_num
= self
.fs
.mon_manager
.get_pool_property(list(new_pools
)[0], "pg_num")
421 self
.assertEqual(expected_pg_num
, created_pg_num
)
423 def test_15303(self
):
425 Reproducer for #15303 "Client holds incorrect complete flag on dir
426 after losing caps" (http://tracker.ceph.com/issues/15303)
428 for m
in self
.mounts
:
431 # Create a dir on mount A
433 self
.mount_a
.run_shell(["mkdir", "parent1"])
434 self
.mount_a
.run_shell(["mkdir", "parent2"])
435 self
.mount_a
.run_shell(["mkdir", "parent1/mydir"])
437 # Put some files in it from mount B
439 self
.mount_b
.run_shell(["touch", "parent1/mydir/afile"])
440 self
.mount_b
.umount_wait()
442 # List the dir's contents on mount A
443 self
.assertListEqual(self
.mount_a
.ls("parent1/mydir"),
446 def test_evict_client(self
):
448 That a volume client can be evicted based on its auth ID and the volume
452 if not isinstance(self
.mount_a
, FuseMount
):
453 self
.skipTest("Requires FUSE client to inject client metadata")
455 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
456 # and mounts[3] would be used as guests to mount the volumes/shares.
458 for i
in range(1, 4):
459 self
.mounts
[i
].umount_wait()
461 volumeclient_mount
= self
.mounts
[1]
462 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
463 guest_mounts
= (self
.mounts
[2], self
.mounts
[3])
465 guest_entity
= "guest"
470 # Create two volumes. Authorize 'guest' auth ID to mount the two
471 # volumes. Mount the two volumes. Write data to the volumes.
474 volume_ids
.append("volid_{0}".format(str(i
)))
476 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
477 vp = VolumePath("{group_id}", "{volume_id}")
478 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
479 print create_result['mount_path']
482 volume_id
=volume_ids
[i
]
485 # Authorize 'guest' auth ID to mount the volume.
486 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mounts
[i
],
487 guest_entity
, mount_paths
[i
])
490 guest_mounts
[i
].mountpoint_dir_name
= 'mnt.{id}.{suffix}'.format(
491 id=guest_entity
, suffix
=str(i
))
492 guest_mounts
[i
].mount(mount_path
=mount_paths
[i
])
493 guest_mounts
[i
].write_n_mb("data.bin", 1)
496 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
498 self
._volume
_client
_python
(self
.mount_b
, dedent("""
499 vp = VolumePath("{group_id}", "{volume_id}")
500 vc.deauthorize(vp, "{guest_entity}")
501 vc.evict("{guest_entity}", volume_path=vp)
504 volume_id
=volume_ids
[0],
505 guest_entity
=guest_entity
508 # Evicted guest client, guest_mounts[0], should not be able to do
509 # anymore metadata ops. It should start failing all operations
510 # when it sees that its own address is in the blacklist.
512 guest_mounts
[0].write_n_mb("rogue.bin", 1)
513 except CommandFailedError
:
516 raise RuntimeError("post-eviction write should have failed!")
518 # The blacklisted guest client should now be unmountable
519 guest_mounts
[0].umount_wait()
521 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
522 # has mounted the other volume, should be able to use its volume
524 guest_mounts
[1].write_n_mb("data.bin.1", 1)
528 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
529 vp = VolumePath("{group_id}", "{volume_id}")
530 vc.deauthorize(vp, "{guest_entity}")
535 volume_id
=volume_ids
[i
],
536 guest_entity
=guest_entity
540 def test_purge(self
):
542 Reproducer for #15266, exception trying to purge volumes that
543 contain non-ascii filenames.
545 Additionally test any other purge corner cases here.
547 # I'm going to leave mount_b unmounted and just use it as a handle for
548 # driving volumeclient. It's a little hacky but we don't have a more
549 # general concept for librados/libcephfs clients as opposed to full
550 # blown mounting clients.
551 self
.mount_b
.umount_wait()
552 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
555 # Use a unicode volume ID (like Manila), to reproduce #15266
559 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
560 vp = VolumePath("{group_id}", u"{volume_id}")
561 create_result = vc.create_volume(vp, 10)
562 print create_result['mount_path']
569 mount_path
= mount_path
[1:]
571 # A file with non-ascii characters
572 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, u
"b\u00F6b")])
574 # A file with no permissions to do anything
575 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, "noperms")])
576 self
.mount_a
.run_shell(["chmod", "0000", os
.path
.join(mount_path
, "noperms")])
578 self
._volume
_client
_python
(self
.mount_b
, dedent("""
579 vp = VolumePath("{group_id}", u"{volume_id}")
587 # Check it's really gone
588 self
.assertEqual(self
.mount_a
.ls("volumes/_deleting"), [])
589 self
.assertEqual(self
.mount_a
.ls("volumes/"), ["_deleting", group_id
])
591 def test_readonly_authorization(self
):
593 That guest clients can be restricted to read-only mounts of volumes.
596 volumeclient_mount
= self
.mounts
[1]
597 guest_mount
= self
.mounts
[2]
598 volumeclient_mount
.umount_wait()
599 guest_mount
.umount_wait()
601 # Configure volumeclient_mount as the handle for driving volumeclient.
602 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
604 guest_entity
= "guest"
609 mount_path
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
610 vp = VolumePath("{group_id}", "{volume_id}")
611 create_result = vc.create_volume(vp, 1024*1024*10)
612 print create_result['mount_path']
618 # Authorize and configure credentials for the guest to mount the
619 # the volume with read-write access.
620 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
621 mount_path
, readonly
=False)
623 # Mount the volume, and write to it.
624 guest_mount
.mount(mount_path
=mount_path
)
625 guest_mount
.write_n_mb("data.bin", 1)
627 # Change the guest auth ID's authorization to read-only mount access.
628 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
629 vp = VolumePath("{group_id}", "{volume_id}")
630 vc.deauthorize(vp, "{guest_entity}")
634 guest_entity
=guest_entity
636 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
637 mount_path
, readonly
=True)
639 # The effect of the change in access level to read-only is not
640 # immediate. The guest sees the change only after a remount of
642 guest_mount
.umount_wait()
643 guest_mount
.mount(mount_path
=mount_path
)
645 # Read existing content of the volume.
646 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
647 # Cannot write into read-only volume.
648 with self
.assertRaises(CommandFailedError
):
649 guest_mount
.write_n_mb("rogue.bin", 1)
651 def test_get_authorized_ids(self
):
653 That for a volume, the authorized IDs and their access levels
654 can be obtained using CephFSVolumeClient's get_authorized_ids().
656 volumeclient_mount
= self
.mounts
[1]
657 volumeclient_mount
.umount_wait()
659 # Configure volumeclient_mount as the handle for driving volumeclient.
660 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
664 guest_entity_1
= "guest1"
665 guest_entity_2
= "guest2"
667 log
.info("print group ID: {0}".format(group_id
))
670 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
671 vp = VolumePath("{group_id}", "{volume_id}")
672 vc.create_volume(vp, 1024*1024*10)
673 auths = vc.get_authorized_ids(vp)
679 # Check the list of authorized IDs for the volume.
680 expected_result
= None
681 self
.assertEqual(str(expected_result
), auths
)
683 # Allow two auth IDs access to the volume.
684 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
685 vp = VolumePath("{group_id}", "{volume_id}")
686 vc.authorize(vp, "{guest_entity_1}", readonly=False)
687 vc.authorize(vp, "{guest_entity_2}", readonly=True)
688 auths = vc.get_authorized_ids(vp)
693 guest_entity_1
=guest_entity_1
,
694 guest_entity_2
=guest_entity_2
,
696 # Check the list of authorized IDs and their access levels.
697 expected_result
= [(u
'guest1', u
'rw'), (u
'guest2', u
'r')]
698 self
.assertItemsEqual(str(expected_result
), auths
)
700 # Disallow both the auth IDs' access to the volume.
701 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
702 vp = VolumePath("{group_id}", "{volume_id}")
703 vc.deauthorize(vp, "{guest_entity_1}")
704 vc.deauthorize(vp, "{guest_entity_2}")
705 auths = vc.get_authorized_ids(vp)
710 guest_entity_1
=guest_entity_1
,
711 guest_entity_2
=guest_entity_2
,
713 # Check the list of authorized IDs for the volume.
714 expected_result
= None
715 self
.assertItemsEqual(str(expected_result
), auths
)
717 def test_multitenant_volumes(self
):
719 That volume access can be restricted to a tenant.
721 That metadata used to enforce tenant isolation of
722 volumes is stored as a two-way mapping between auth
723 IDs and volumes that they're authorized to access.
725 volumeclient_mount
= self
.mounts
[1]
726 volumeclient_mount
.umount_wait()
728 # Configure volumeclient_mount as the handle for driving volumeclient.
729 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
732 volume_id
= "volumeid"
734 # Guest clients belonging to different tenants, but using the same
739 "tenant_id": "tenant1",
743 "tenant_id": "tenant2",
747 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
748 vp = VolumePath("{group_id}", "{volume_id}")
749 vc.create_volume(vp, 1024*1024*10)
755 # Check that volume metadata file is created on volume creation.
756 vol_metadata_filename
= "_{0}:{1}.meta".format(group_id
, volume_id
)
757 self
.assertIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
759 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
760 # 'tenant1', with 'rw' access to the volume.
761 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
762 vp = VolumePath("{group_id}", "{volume_id}")
763 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
767 auth_id
=guestclient_1
["auth_id"],
768 tenant_id
=guestclient_1
["tenant_id"]
771 # Check that auth metadata file for auth ID 'guest', is
772 # created on authorizing 'guest' access to the volume.
773 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
774 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
776 # Verify that the auth metadata file stores the tenant ID that the
777 # auth ID belongs to, the auth ID's authorized access levels
778 # for different volumes, versioning details, etc.
779 expected_auth_metadata
= {
783 "tenant_id": u
"tenant1",
785 "groupid/volumeid": {
787 "access_level": u
"rw",
792 auth_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
794 vp = VolumePath("{group_id}", "{volume_id}")
795 auth_metadata = vc._auth_metadata_get("{auth_id}")
796 print(json.dumps(auth_metadata))
800 auth_id
=guestclient_1
["auth_id"],
802 auth_metadata
= json
.loads(auth_metadata
)
804 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
805 del expected_auth_metadata
["version"]
806 del auth_metadata
["version"]
807 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
809 # Verify that the volume metadata file stores info about auth IDs
810 # and their access levels to the volume, versioning details, etc.
811 expected_vol_metadata
= {
817 "access_level": u
"rw"
822 vol_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
824 vp = VolumePath("{group_id}", "{volume_id}")
825 volume_metadata = vc._volume_metadata_get(vp)
826 print(json.dumps(volume_metadata))
831 vol_metadata
= json
.loads(vol_metadata
)
833 self
.assertGreaterEqual(vol_metadata
["version"], expected_vol_metadata
["version"])
834 del expected_vol_metadata
["version"]
835 del vol_metadata
["version"]
836 self
.assertEqual(expected_vol_metadata
, vol_metadata
)
838 # Cannot authorize 'guestclient_2' to access the volume.
839 # It uses auth ID 'guest', which has already been used by a
840 # 'guestclient_1' belonging to an another tenant for accessing
842 with self
.assertRaises(CommandFailedError
):
843 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
844 vp = VolumePath("{group_id}", "{volume_id}")
845 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
849 auth_id
=guestclient_2
["auth_id"],
850 tenant_id
=guestclient_2
["tenant_id"]
853 # Check that auth metadata file is cleaned up on removing
854 # auth ID's only access to a volume.
855 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
856 vp = VolumePath("{group_id}", "{volume_id}")
857 vc.deauthorize(vp, "{guest_entity}")
861 guest_entity
=guestclient_1
["auth_id"]
864 self
.assertNotIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
866 # Check that volume metadata file is cleaned up on volume deletion.
867 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
868 vp = VolumePath("{group_id}", "{volume_id}")
874 self
.assertNotIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
876 def test_recover_metadata(self
):
878 That volume client can recover from partial auth updates using
879 metadata files, which store auth info and its update status info.
881 volumeclient_mount
= self
.mounts
[1]
882 volumeclient_mount
.umount_wait()
884 # Configure volumeclient_mount as the handle for driving volumeclient.
885 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
888 volume_id
= "volumeid"
892 "tenant_id": "tenant",
896 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
897 vp = VolumePath("{group_id}", "{volume_id}")
898 vc.create_volume(vp, 1024*1024*10)
904 # Authorize 'guestclient' access to the volume.
905 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
906 vp = VolumePath("{group_id}", "{volume_id}")
907 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
911 auth_id
=guestclient
["auth_id"],
912 tenant_id
=guestclient
["tenant_id"]
915 # Check that auth metadata file for auth ID 'guest' is created.
916 auth_metadata_filename
= "${0}.meta".format(guestclient
["auth_id"])
917 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
919 # Induce partial auth update state by modifying the auth metadata file,
920 # and then run recovery procedure.
921 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
922 vp = VolumePath("{group_id}", "{volume_id}")
923 auth_metadata = vc._auth_metadata_get("{auth_id}")
924 auth_metadata['dirty'] = True
925 vc._auth_metadata_set("{auth_id}", auth_metadata)
930 auth_id
=guestclient
["auth_id"],
933 def test_put_object(self
):
934 vc_mount
= self
.mounts
[1]
935 vc_mount
.umount_wait()
936 self
._configure
_vc
_auth
(vc_mount
, "manila")
938 obj_data
= 'test data'
939 obj_name
= 'test_vc_obj_1'
940 pool_name
= self
.fs
.get_data_pool_names()[0]
942 self
._volume
_client
_python
(vc_mount
, dedent("""
943 vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}")
945 pool_name
= pool_name
,
950 read_data
= self
.fs
.rados(['get', obj_name
, '-'], pool
=pool_name
)
951 self
.assertEqual(obj_data
, read_data
)
953 def test_get_object(self
):
954 vc_mount
= self
.mounts
[1]
955 vc_mount
.umount_wait()
956 self
._configure
_vc
_auth
(vc_mount
, "manila")
958 obj_data
= 'test_data'
959 obj_name
= 'test_vc_ob_2'
960 pool_name
= self
.fs
.get_data_pool_names()[0]
962 self
.fs
.rados(['put', obj_name
, '-'], pool
=pool_name
, stdin_data
=obj_data
)
964 self
._volume
_client
_python
(vc_mount
, dedent("""
965 data_read = vc.get_object("{pool_name}", "{obj_name}")
966 assert data_read == b"{obj_data}"
968 pool_name
= pool_name
,
973 def test_delete_object(self
):
974 vc_mount
= self
.mounts
[1]
975 vc_mount
.umount_wait()
976 self
._configure
_vc
_auth
(vc_mount
, "manila")
978 obj_data
= 'test data'
979 obj_name
= 'test_vc_obj_3'
980 pool_name
= self
.fs
.get_data_pool_names()[0]
982 self
.fs
.rados(['put', obj_name
, '-'], pool
=pool_name
, stdin_data
=obj_data
)
984 self
._volume
_client
_python
(vc_mount
, dedent("""
985 data_read = vc.delete_object("{pool_name}", "{obj_name}")
987 pool_name
= pool_name
,
991 with self
.assertRaises(CommandFailedError
):
992 self
.fs
.rados(['stat', obj_name
], pool
=pool_name
)
994 # Check idempotency -- no error raised trying to delete non-existent
996 self
._volume
_client
_python
(vc_mount
, dedent("""
997 data_read = vc.delete_object("{pool_name}", "{obj_name}")
999 pool_name
= pool_name
,
1000 obj_name
= obj_name
,
1003 def test_21501(self
):
1005 Reproducer for #21501 "ceph_volume_client: sets invalid caps for
1006 existing IDs with no caps" (http://tracker.ceph.com/issues/21501)
1009 vc_mount
= self
.mounts
[1]
1010 vc_mount
.umount_wait()
1012 # Configure vc_mount as the handle for driving volumeclient
1013 self
._configure
_vc
_auth
(vc_mount
, "manila")
1018 mount_path
= self
._volume
_client
_python
(vc_mount
, dedent("""
1019 vp = VolumePath("{group_id}", "{volume_id}")
1020 create_result = vc.create_volume(vp, 1024*1024*10)
1021 print create_result['mount_path']
1027 # Create an auth ID with no caps
1029 self
.fs
.mon_manager
.raw_cluster_cmd_result(
1030 'auth', 'get-or-create', 'client.{0}'.format(guest_id
))
1032 guest_mount
= self
.mounts
[2]
1033 guest_mount
.umount_wait()
1035 # Set auth caps for the auth ID using the volumeclient
1036 self
._configure
_guest
_auth
(vc_mount
, guest_mount
, guest_id
, mount_path
)
1038 # Mount the volume in the guest using the auth ID to assert that the
1039 # auth caps are valid
1040 guest_mount
.mount(mount_path
=mount_path
)
1042 def test_volume_without_namespace_isolation(self
):
1044 That volume client can create volumes that do not have separate RADOS
1047 vc_mount
= self
.mounts
[1]
1048 vc_mount
.umount_wait()
1050 # Configure vc_mount as the handle for driving volumeclient
1051 self
._configure
_vc
_auth
(vc_mount
, "manila")
1054 volume_prefix
= "/myprefix"
1057 mount_path
= self
._volume
_client
_python
(vc_mount
, dedent("""
1058 vp = VolumePath("{group_id}", "{volume_id}")
1059 create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
1060 print create_result['mount_path']
1066 # The CephFS volume should be created
1067 self
.mounts
[0].stat(os
.path
.join("myprefix", group_id
, volume_id
))
1068 vol_namespace
= self
.mounts
[0].getfattr(
1069 os
.path
.join("myprefix", group_id
, volume_id
),
1070 "ceph.dir.layout.pool_namespace")
1071 assert not vol_namespace
1073 self
._volume
_client
_python
(vc_mount
, dedent("""
1074 vp = VolumePath("{group_id}", "{volume_id}")
1075 vc.delete_volume(vp)
1079 volume_id
=volume_id
,