]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
3e6c7d63b72f5fa48476249b287486aae9806e8f
4 from textwrap
import dedent
5 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
6 from tasks
.cephfs
.fuse_mount
import FuseMount
7 from teuthology
.exceptions
import CommandFailedError
8 from teuthology
.misc
import sudo_write_file
10 log
= logging
.getLogger(__name__
)
13 class TestVolumeClient(CephFSTestCase
):
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
17 default_py_version
= 'python3'
20 CephFSTestCase
.setUp(self
)
21 self
.py_version
= self
.ctx
.config
.get('overrides', {}).\
22 get('python3', TestVolumeClient
.default_py_version
)
23 log
.info("using python version: {python_version}".format(
24 python_version
=self
.py_version
27 def _volume_client_python(self
, client
, script
, vol_prefix
=None, ns_prefix
=None):
28 # Can't dedent this *and* the script we pass in, because they might have different
29 # levels of indentation to begin with, so leave this string zero-indented
31 vol_prefix
= "\"" + vol_prefix
+ "\""
33 ns_prefix
= "\"" + ns_prefix
+ "\""
34 return client
.run_python("""
35 from __future__ import print_function
36 from ceph_volume_client import CephFSVolumeClient, VolumePath
37 from sys import version_info as sys_version_info
38 from rados import OSError as rados_OSError
40 log = logging.getLogger("ceph_volume_client")
41 log.addHandler(logging.StreamHandler())
42 log.setLevel(logging.DEBUG)
43 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
47 """.format(payload
=script
, conf_path
=client
.config_path
,
48 vol_prefix
=vol_prefix
, ns_prefix
=ns_prefix
),
51 def _configure_vc_auth(self
, mount
, id_name
):
53 Set up auth credentials for the VolumeClient user
55 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
56 "auth", "get-or-create", "client.{name}".format(name
=id_name
),
61 mount
.client_id
= id_name
62 sudo_write_file(mount
.client_remote
, mount
.get_keyring_path(), out
)
63 self
.set_conf("client.{name}".format(name
=id_name
), "keyring", mount
.get_keyring_path())
65 def _configure_guest_auth(self
, volumeclient_mount
, guest_mount
,
66 guest_entity
, mount_path
,
67 namespace_prefix
=None, readonly
=False,
68 tenant_id
=None, allow_existing_id
=False):
70 Set up auth credentials for the guest client to mount a volume.
72 :param volumeclient_mount: mount used as the handle for driving
74 :param guest_mount: mount used by the guest client.
75 :param guest_entity: auth ID used by the guest client.
76 :param mount_path: path of the volume.
77 :param namespace_prefix: name prefix of the RADOS namespace, which
78 is used for the volume's layout.
79 :param readonly: defaults to False. If set to 'True' only read-only
80 mount access is granted to the guest.
81 :param tenant_id: (OpenStack) tenant ID of the guest client.
84 head
, volume_id
= os
.path
.split(mount_path
)
85 head
, group_id
= os
.path
.split(head
)
86 head
, volume_prefix
= os
.path
.split(head
)
87 volume_prefix
= "/" + volume_prefix
89 # Authorize the guest client's auth ID to mount the volume.
90 key
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
91 vp = VolumePath("{group_id}", "{volume_id}")
92 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
93 tenant_id="{tenant_id}",
94 allow_existing_id="{allow_existing_id}")
95 print(auth_result['auth_key'])
99 guest_entity
=guest_entity
,
102 allow_existing_id
=allow_existing_id
)), volume_prefix
, namespace_prefix
105 # CephFSVolumeClient's authorize() does not return the secret
106 # key to a caller who isn't multi-tenant aware. Explicitly
107 # query the key for such a client.
109 key
= self
.fs
.mon_manager
.raw_cluster_cmd(
110 "auth", "get-key", "client.{name}".format(name
=guest_entity
),
113 # The guest auth ID should exist.
114 existing_ids
= [a
['entity'] for a
in self
.auth_list()]
115 self
.assertIn("client.{0}".format(guest_entity
), existing_ids
)
117 # Create keyring file for the guest client.
118 keyring_txt
= dedent("""
119 [client.{guest_entity}]
123 guest_entity
=guest_entity
,
126 guest_mount
.client_id
= guest_entity
127 sudo_write_file(guest_mount
.client_remote
,
128 guest_mount
.get_keyring_path(), keyring_txt
)
130 # Add a guest client section to the ceph config file.
131 self
.set_conf("client.{0}".format(guest_entity
), "client quota", "True")
132 self
.set_conf("client.{0}".format(guest_entity
), "debug client", "20")
133 self
.set_conf("client.{0}".format(guest_entity
), "debug objecter", "20")
134 self
.set_conf("client.{0}".format(guest_entity
),
135 "keyring", guest_mount
.get_keyring_path())
137 def test_default_prefix(self
):
140 DEFAULT_VOL_PREFIX
= "volumes"
141 DEFAULT_NS_PREFIX
= "fsvolumens_"
143 self
.mount_b
.umount_wait()
144 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
146 #create a volume with default prefix
147 self
._volume
_client
_python
(self
.mount_b
, dedent("""
148 vp = VolumePath("{group_id}", "{volume_id}")
149 vc.create_volume(vp, 10, data_isolated=True)
155 # The dir should be created
156 self
.mount_a
.stat(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
))
158 #namespace should be set
159 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join(DEFAULT_VOL_PREFIX
, group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
160 namespace
= "{0}{1}".format(DEFAULT_NS_PREFIX
, volume_id
)
161 self
.assertEqual(namespace
, ns_in_attr
)
164 def test_lifecycle(self
):
166 General smoke test for create, extend, destroy
169 # I'm going to use mount_c later as a guest for mounting the created
171 self
.mounts
[2].umount_wait()
173 # I'm going to leave mount_b unmounted and just use it as a handle for
174 # driving volumeclient. It's a little hacky but we don't have a more
175 # general concept for librados/libcephfs clients as opposed to full
176 # blown mounting clients.
177 self
.mount_b
.umount_wait()
178 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
180 guest_entity
= "guest"
184 volume_prefix
= "/myprefix"
185 namespace_prefix
= "mynsprefix_"
187 # Create a 100MB volume
189 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
190 vp = VolumePath("{group_id}", "{volume_id}")
191 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
192 print(create_result['mount_path'])
196 volume_size
=volume_size
197 )), volume_prefix
, namespace_prefix
)
199 # The dir should be created
200 self
.mount_a
.stat(os
.path
.join("myprefix", group_id
, volume_id
))
202 # Authorize and configure credentials for the guest to mount the
204 self
._configure
_guest
_auth
(self
.mount_b
, self
.mounts
[2], guest_entity
,
205 mount_path
, namespace_prefix
)
206 self
.mounts
[2].mount(mount_path
=mount_path
)
208 # The kernel client doesn't have the quota-based df behaviour,
209 # or quotas at all, so only exercise the client behaviour when
211 if isinstance(self
.mounts
[2], FuseMount
):
212 # df should see volume size, same as the quota set on volume's dir
213 self
.assertEqual(self
.mounts
[2].df()['total'],
214 volume_size
* 1024 * 1024)
216 self
.mount_a
.getfattr(
217 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
218 "ceph.quota.max_bytes"),
219 "%s" % (volume_size
* 1024 * 1024))
221 # df granularity is 4MB block so have to write at least that much
223 self
.mounts
[2].write_n_mb("data.bin", data_bin_mb
)
225 # Write something outside volume to check this space usage is
226 # not reported in the volume's DF.
228 self
.mount_a
.write_n_mb("other.bin", other_bin_mb
)
230 # global: df should see all the writes (data + other). This is a >
231 # rather than a == because the global spaced used includes all pools
233 used
= self
.mount_a
.df()['used']
234 return used
>= (other_bin_mb
* 1024 * 1024)
236 self
.wait_until_true(check_df
, timeout
=30)
238 # Hack: do a metadata IO to kick rstats
239 self
.mounts
[2].run_shell(["touch", "foo"])
241 # volume: df should see the data_bin_mb consumed from quota, same
242 # as the rbytes for the volume's dir
243 self
.wait_until_equal(
244 lambda: self
.mounts
[2].df()['used'],
245 data_bin_mb
* 1024 * 1024, timeout
=60)
246 self
.wait_until_equal(
247 lambda: self
.mount_a
.getfattr(
248 os
.path
.join(volume_prefix
.strip("/"), group_id
, volume_id
),
250 "%s" % (data_bin_mb
* 1024 * 1024), timeout
=60)
252 # sync so that file data are persist to rados
253 self
.mounts
[2].run_shell(["sync"])
255 # Our data should stay in particular rados namespace
256 pool_name
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool")
257 namespace
= "{0}{1}".format(namespace_prefix
, volume_id
)
258 ns_in_attr
= self
.mount_a
.getfattr(os
.path
.join("myprefix", group_id
, volume_id
), "ceph.dir.layout.pool_namespace")
259 self
.assertEqual(namespace
, ns_in_attr
)
261 objects_in_ns
= set(self
.fs
.rados(["ls"], pool
=pool_name
, namespace
=namespace
).split("\n"))
262 self
.assertNotEqual(objects_in_ns
, set())
264 # De-authorize the guest
265 self
._volume
_client
_python
(self
.mount_b
, dedent("""
266 vp = VolumePath("{group_id}", "{volume_id}")
267 vc.deauthorize(vp, "{guest_entity}")
268 vc.evict("{guest_entity}")
272 guest_entity
=guest_entity
273 )), volume_prefix
, namespace_prefix
)
275 # Once deauthorized, the client should be unable to do any more metadata ops
276 # The way that the client currently behaves here is to block (it acts like
277 # it has lost network, because there is nothing to tell it that is messages
278 # are being dropped because it's identity is gone)
279 background
= self
.mounts
[2].write_n_mb("rogue.bin", 1, wait
=False)
282 except CommandFailedError
:
283 # command failed with EBLACKLISTED?
284 if "transport endpoint shutdown" in background
.stderr
.getvalue():
289 # After deauthorisation, the client ID should be gone (this was the only
290 # volume it was authorised for)
291 self
.assertNotIn("client.{0}".format(guest_entity
), [e
['entity'] for e
in self
.auth_list()])
293 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
294 self
.mounts
[2].umount_wait()
296 self
._volume
_client
_python
(self
.mount_b
, dedent("""
297 vp = VolumePath("{group_id}", "{volume_id}")
303 )), volume_prefix
, namespace_prefix
)
305 def test_idempotency(self
):
307 That the volumeclient interface works when calling everything twice
309 self
.mount_b
.umount_wait()
310 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
312 guest_entity
= "guest"
315 self
._volume
_client
_python
(self
.mount_b
, dedent("""
316 vp = VolumePath("{group_id}", "{volume_id}")
317 vc.create_volume(vp, 10)
318 vc.create_volume(vp, 10)
319 vc.authorize(vp, "{guest_entity}")
320 vc.authorize(vp, "{guest_entity}")
321 vc.deauthorize(vp, "{guest_entity}")
322 vc.deauthorize(vp, "{guest_entity}")
328 vc.create_volume(vp, 10, data_isolated=True)
329 vc.create_volume(vp, 10, data_isolated=True)
330 vc.authorize(vp, "{guest_entity}")
331 vc.authorize(vp, "{guest_entity}")
332 vc.deauthorize(vp, "{guest_entity}")
333 vc.deauthorize(vp, "{guest_entity}")
334 vc.evict("{guest_entity}")
335 vc.evict("{guest_entity}")
336 vc.delete_volume(vp, data_isolated=True)
337 vc.delete_volume(vp, data_isolated=True)
338 vc.purge_volume(vp, data_isolated=True)
339 vc.purge_volume(vp, data_isolated=True)
341 vc.create_volume(vp, 10, namespace_isolated=False)
342 vc.create_volume(vp, 10, namespace_isolated=False)
343 vc.authorize(vp, "{guest_entity}")
344 vc.authorize(vp, "{guest_entity}")
345 vc.deauthorize(vp, "{guest_entity}")
346 vc.deauthorize(vp, "{guest_entity}")
347 vc.evict("{guest_entity}")
348 vc.evict("{guest_entity}")
356 guest_entity
=guest_entity
359 def test_data_isolated(self
):
361 That data isolated shares get their own pool
365 self
.mount_b
.umount_wait()
366 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
368 pools_a
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
372 self
._volume
_client
_python
(self
.mount_b
, dedent("""
373 vp = VolumePath("{group_id}", "{volume_id}")
374 vc.create_volume(vp, data_isolated=True)
380 pools_b
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
382 # Should have created one new pool
383 new_pools
= set(p
['pool_name'] for p
in pools_b
) - set([p
['pool_name'] for p
in pools_a
])
384 self
.assertEqual(len(new_pools
), 1)
386 def test_15303(self
):
388 Reproducer for #15303 "Client holds incorrect complete flag on dir
389 after losing caps" (http://tracker.ceph.com/issues/15303)
391 for m
in self
.mounts
:
394 # Create a dir on mount A
395 self
.mount_a
.mount_wait()
396 self
.mount_a
.run_shell(["mkdir", "parent1"])
397 self
.mount_a
.run_shell(["mkdir", "parent2"])
398 self
.mount_a
.run_shell(["mkdir", "parent1/mydir"])
400 # Put some files in it from mount B
401 self
.mount_b
.mount_wait()
402 self
.mount_b
.run_shell(["touch", "parent1/mydir/afile"])
403 self
.mount_b
.umount_wait()
405 # List the dir's contents on mount A
406 self
.assertListEqual(self
.mount_a
.ls("parent1/mydir"),
409 def test_evict_client(self
):
411 That a volume client can be evicted based on its auth ID and the volume
415 if not isinstance(self
.mount_a
, FuseMount
):
416 self
.skipTest("Requires FUSE client to inject client metadata")
418 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
419 # and mounts[3] would be used as guests to mount the volumes/shares.
421 for i
in range(1, 4):
422 self
.mounts
[i
].umount_wait()
424 volumeclient_mount
= self
.mounts
[1]
425 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
426 guest_mounts
= (self
.mounts
[2], self
.mounts
[3])
428 guest_entity
= "guest"
433 # Create two volumes. Authorize 'guest' auth ID to mount the two
434 # volumes. Mount the two volumes. Write data to the volumes.
437 volume_ids
.append("volid_{0}".format(str(i
)))
439 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
440 vp = VolumePath("{group_id}", "{volume_id}")
441 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
442 print(create_result['mount_path'])
445 volume_id
=volume_ids
[i
]
448 # Authorize 'guest' auth ID to mount the volume.
449 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mounts
[i
],
450 guest_entity
, mount_paths
[i
])
453 guest_mounts
[i
].mountpoint_dir_name
= 'mnt.{id}.{suffix}'.format(
454 id=guest_entity
, suffix
=str(i
))
455 guest_mounts
[i
].mount(mount_path
=mount_paths
[i
])
456 guest_mounts
[i
].write_n_mb("data.bin", 1)
459 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
461 self
._volume
_client
_python
(self
.mount_b
, dedent("""
462 vp = VolumePath("{group_id}", "{volume_id}")
463 vc.deauthorize(vp, "{guest_entity}")
464 vc.evict("{guest_entity}", volume_path=vp)
467 volume_id
=volume_ids
[0],
468 guest_entity
=guest_entity
471 # Evicted guest client, guest_mounts[0], should not be able to do
472 # anymore metadata ops. It should start failing all operations
473 # when it sees that its own address is in the blacklist.
475 guest_mounts
[0].write_n_mb("rogue.bin", 1)
476 except CommandFailedError
:
479 raise RuntimeError("post-eviction write should have failed!")
481 # The blacklisted guest client should now be unmountable
482 guest_mounts
[0].umount_wait()
484 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
485 # has mounted the other volume, should be able to use its volume
487 guest_mounts
[1].write_n_mb("data.bin.1", 1)
491 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
492 vp = VolumePath("{group_id}", "{volume_id}")
493 vc.deauthorize(vp, "{guest_entity}")
498 volume_id
=volume_ids
[i
],
499 guest_entity
=guest_entity
503 def test_purge(self
):
505 Reproducer for #15266, exception trying to purge volumes that
506 contain non-ascii filenames.
508 Additionally test any other purge corner cases here.
510 # I'm going to leave mount_b unmounted and just use it as a handle for
511 # driving volumeclient. It's a little hacky but we don't have a more
512 # general concept for librados/libcephfs clients as opposed to full
513 # blown mounting clients.
514 self
.mount_b
.umount_wait()
515 self
._configure
_vc
_auth
(self
.mount_b
, "manila")
518 # Use a unicode volume ID (like Manila), to reproduce #15266
522 mount_path
= self
._volume
_client
_python
(self
.mount_b
, dedent("""
523 vp = VolumePath("{group_id}", u"{volume_id}")
524 create_result = vc.create_volume(vp, 10)
525 print(create_result['mount_path'])
532 mount_path
= mount_path
[1:]
534 # A file with non-ascii characters
535 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, u
"b\u00F6b")])
537 # A file with no permissions to do anything
538 self
.mount_a
.run_shell(["touch", os
.path
.join(mount_path
, "noperms")])
539 self
.mount_a
.run_shell(["chmod", "0000", os
.path
.join(mount_path
, "noperms")])
541 self
._volume
_client
_python
(self
.mount_b
, dedent("""
542 vp = VolumePath("{group_id}", u"{volume_id}")
550 # Check it's really gone
551 self
.assertEqual(self
.mount_a
.ls("volumes/_deleting"), [])
552 self
.assertEqual(self
.mount_a
.ls("volumes/"), ["_deleting", group_id
])
554 def test_readonly_authorization(self
):
556 That guest clients can be restricted to read-only mounts of volumes.
559 volumeclient_mount
= self
.mounts
[1]
560 guest_mount
= self
.mounts
[2]
561 volumeclient_mount
.umount_wait()
562 guest_mount
.umount_wait()
564 # Configure volumeclient_mount as the handle for driving volumeclient.
565 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
567 guest_entity
= "guest"
572 mount_path
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
573 vp = VolumePath("{group_id}", "{volume_id}")
574 create_result = vc.create_volume(vp, 1024*1024*10)
575 print(create_result['mount_path'])
581 # Authorize and configure credentials for the guest to mount the
582 # the volume with read-write access.
583 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
584 mount_path
, readonly
=False)
586 # Mount the volume, and write to it.
587 guest_mount
.mount(mount_path
=mount_path
)
588 guest_mount
.write_n_mb("data.bin", 1)
590 # Change the guest auth ID's authorization to read-only mount access.
591 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
592 vp = VolumePath("{group_id}", "{volume_id}")
593 vc.deauthorize(vp, "{guest_entity}")
597 guest_entity
=guest_entity
599 self
._configure
_guest
_auth
(volumeclient_mount
, guest_mount
, guest_entity
,
600 mount_path
, readonly
=True)
602 # The effect of the change in access level to read-only is not
603 # immediate. The guest sees the change only after a remount of
605 guest_mount
.umount_wait()
606 guest_mount
.mount(mount_path
=mount_path
)
608 # Read existing content of the volume.
609 self
.assertListEqual(guest_mount
.ls(guest_mount
.mountpoint
), ["data.bin"])
610 # Cannot write into read-only volume.
612 guest_mount
.write_n_mb("rogue.bin", 1)
613 except CommandFailedError
:
616 def test_get_authorized_ids(self
):
618 That for a volume, the authorized IDs and their access levels
619 can be obtained using CephFSVolumeClient's get_authorized_ids().
621 volumeclient_mount
= self
.mounts
[1]
622 volumeclient_mount
.umount_wait()
624 # Configure volumeclient_mount as the handle for driving volumeclient.
625 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
629 guest_entity_1
= "guest1"
630 guest_entity_2
= "guest2"
632 log
.info("print(group ID: {0})".format(group_id
))
635 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
636 vp = VolumePath("{group_id}", "{volume_id}")
637 vc.create_volume(vp, 1024*1024*10)
638 auths = vc.get_authorized_ids(vp)
644 # Check the list of authorized IDs for the volume.
645 self
.assertEqual('None', auths
)
647 # Allow two auth IDs access to the volume.
648 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
649 vp = VolumePath("{group_id}", "{volume_id}")
650 vc.authorize(vp, "{guest_entity_1}", readonly=False)
651 vc.authorize(vp, "{guest_entity_2}", readonly=True)
652 auths = vc.get_authorized_ids(vp)
657 guest_entity_1
=guest_entity_1
,
658 guest_entity_2
=guest_entity_2
,
660 # Check the list of authorized IDs and their access levels.
661 if self
.py_version
== 'python3':
662 expected_result
= [('guest1', 'rw'), ('guest2', 'r')]
663 self
.assertCountEqual(str(expected_result
), auths
)
665 expected_result
= [(u
'guest1', u
'rw'), (u
'guest2', u
'r')]
666 self
.assertItemsEqual(str(expected_result
), auths
)
668 # Disallow both the auth IDs' access to the volume.
669 auths
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
670 vp = VolumePath("{group_id}", "{volume_id}")
671 vc.deauthorize(vp, "{guest_entity_1}")
672 vc.deauthorize(vp, "{guest_entity_2}")
673 auths = vc.get_authorized_ids(vp)
678 guest_entity_1
=guest_entity_1
,
679 guest_entity_2
=guest_entity_2
,
681 # Check the list of authorized IDs for the volume.
682 self
.assertEqual('None', auths
)
684 def test_multitenant_volumes(self
):
686 That volume access can be restricted to a tenant.
688 That metadata used to enforce tenant isolation of
689 volumes is stored as a two-way mapping between auth
690 IDs and volumes that they're authorized to access.
692 volumeclient_mount
= self
.mounts
[1]
693 volumeclient_mount
.umount_wait()
695 # Configure volumeclient_mount as the handle for driving volumeclient.
696 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
699 volume_id
= "volumeid"
701 # Guest clients belonging to different tenants, but using the same
706 "tenant_id": "tenant1",
710 "tenant_id": "tenant2",
714 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
715 vp = VolumePath("{group_id}", "{volume_id}")
716 vc.create_volume(vp, 1024*1024*10)
722 # Check that volume metadata file is created on volume creation.
723 vol_metadata_filename
= "_{0}:{1}.meta".format(group_id
, volume_id
)
724 self
.assertIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
726 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
727 # 'tenant1', with 'rw' access to the volume.
728 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
729 vp = VolumePath("{group_id}", "{volume_id}")
730 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
734 auth_id
=guestclient_1
["auth_id"],
735 tenant_id
=guestclient_1
["tenant_id"]
738 # Check that auth metadata file for auth ID 'guest', is
739 # created on authorizing 'guest' access to the volume.
740 auth_metadata_filename
= "${0}.meta".format(guestclient_1
["auth_id"])
741 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
743 # Verify that the auth metadata file stores the tenant ID that the
744 # auth ID belongs to, the auth ID's authorized access levels
745 # for different volumes, versioning details, etc.
746 expected_auth_metadata
= {
750 "tenant_id": "tenant1",
752 "groupid/volumeid": {
759 auth_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
761 vp = VolumePath("{group_id}", "{volume_id}")
762 auth_metadata = vc._auth_metadata_get("{auth_id}")
763 print(json.dumps(auth_metadata))
767 auth_id
=guestclient_1
["auth_id"],
769 auth_metadata
= json
.loads(auth_metadata
)
771 self
.assertGreaterEqual(auth_metadata
["version"], expected_auth_metadata
["version"])
772 del expected_auth_metadata
["version"]
773 del auth_metadata
["version"]
774 self
.assertEqual(expected_auth_metadata
, auth_metadata
)
776 # Verify that the volume metadata file stores info about auth IDs
777 # and their access levels to the volume, versioning details, etc.
778 expected_vol_metadata
= {
789 vol_metadata
= self
._volume
_client
_python
(volumeclient_mount
, dedent("""
791 vp = VolumePath("{group_id}", "{volume_id}")
792 volume_metadata = vc._volume_metadata_get(vp)
793 print(json.dumps(volume_metadata))
798 vol_metadata
= json
.loads(vol_metadata
)
800 self
.assertGreaterEqual(vol_metadata
["version"], expected_vol_metadata
["version"])
801 del expected_vol_metadata
["version"]
802 del vol_metadata
["version"]
803 self
.assertEqual(expected_vol_metadata
, vol_metadata
)
805 # Cannot authorize 'guestclient_2' to access the volume.
806 # It uses auth ID 'guest', which has already been used by a
807 # 'guestclient_1' belonging to an another tenant for accessing
809 with self
.assertRaises(CommandFailedError
):
810 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
811 vp = VolumePath("{group_id}", "{volume_id}")
812 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
816 auth_id
=guestclient_2
["auth_id"],
817 tenant_id
=guestclient_2
["tenant_id"]
820 # Check that auth metadata file is cleaned up on removing
821 # auth ID's only access to a volume.
822 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
823 vp = VolumePath("{group_id}", "{volume_id}")
824 vc.deauthorize(vp, "{guest_entity}")
828 guest_entity
=guestclient_1
["auth_id"]
831 self
.assertNotIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
833 # Check that volume metadata file is cleaned up on volume deletion.
834 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
835 vp = VolumePath("{group_id}", "{volume_id}")
841 self
.assertNotIn(vol_metadata_filename
, self
.mounts
[0].ls("volumes"))
843 def test_authorize_auth_id_not_created_by_ceph_volume_client(self
):
845 If the auth_id already exists and is not created by
846 ceph_volume_client, it's not allowed to authorize
847 the auth-id by default.
849 volumeclient_mount
= self
.mounts
[1]
850 volumeclient_mount
.umount_wait()
852 # Configure volumeclient_mount as the handle for driving volumeclient.
853 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
856 volume_id
= "volumeid"
859 self
.fs
.mon_manager
.raw_cluster_cmd(
860 "auth", "get-or-create", "client.guest1",
869 "tenant_id": "tenant1",
873 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
874 vp = VolumePath("{group_id}", "{volume_id}")
875 vc.create_volume(vp, 1024*1024*10)
881 # Cannot authorize 'guestclient_1' to access the volume.
882 # It uses auth ID 'guest1', which already exists and not
883 # created by ceph_volume_client
884 with self
.assertRaises(CommandFailedError
):
885 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
886 vp = VolumePath("{group_id}", "{volume_id}")
887 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
891 auth_id
=guestclient_1
["auth_id"],
892 tenant_id
=guestclient_1
["tenant_id"]
896 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
897 vp = VolumePath("{group_id}", "{volume_id}")
904 def test_authorize_allow_existing_id_option(self
):
906 If the auth_id already exists and is not created by
907 ceph_volume_client, it's not allowed to authorize
908 the auth-id by default but is allowed with option
911 volumeclient_mount
= self
.mounts
[1]
912 volumeclient_mount
.umount_wait()
914 # Configure volumeclient_mount as the handle for driving volumeclient.
915 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
918 volume_id
= "volumeid"
921 self
.fs
.mon_manager
.raw_cluster_cmd(
922 "auth", "get-or-create", "client.guest1",
931 "tenant_id": "tenant1",
935 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
936 vp = VolumePath("{group_id}", "{volume_id}")
937 vc.create_volume(vp, 1024*1024*10)
943 # Cannot authorize 'guestclient_1' to access the volume
944 # by default, which already exists and not created by
945 # ceph_volume_client but is allowed with option 'allow_existing_id'.
946 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
947 vp = VolumePath("{group_id}", "{volume_id}")
948 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}",
949 allow_existing_id="{allow_existing_id}")
953 auth_id
=guestclient_1
["auth_id"],
954 tenant_id
=guestclient_1
["tenant_id"],
955 allow_existing_id
=True
959 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
960 vp = VolumePath("{group_id}", "{volume_id}")
967 def test_deauthorize_auth_id_after_out_of_band_update(self
):
969 If the auth_id authorized by ceph_volume_client is updated
970 out of band, the auth_id should not be deleted after a
971 deauthorize. It should only remove caps associated it.
973 volumeclient_mount
= self
.mounts
[1]
974 volumeclient_mount
.umount_wait()
976 # Configure volumeclient_mount as the handle for driving volumeclient.
977 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
980 volume_id
= "volumeid"
986 "tenant_id": "tenant1",
990 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
991 vp = VolumePath("{group_id}", "{volume_id}")
992 vc.create_volume(vp, 1024*1024*10)
998 # Authorize 'guestclient_1' to access the volume.
999 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
1000 vp = VolumePath("{group_id}", "{volume_id}")
1001 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
1004 volume_id
=volume_id
,
1005 auth_id
=guestclient_1
["auth_id"],
1006 tenant_id
=guestclient_1
["tenant_id"]
1009 # Update caps for guestclient_1 out of band
1010 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
1011 "auth", "caps", "client.guest1",
1012 "mds", "allow rw path=/volumes/groupid, allow rw path=/volumes/groupid/volumeid",
1013 "osd", "allow rw pool=cephfs_data namespace=fsvolumens_volumeid",
1018 # Deauthorize guestclient_1
1019 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
1020 vp = VolumePath("{group_id}", "{volume_id}")
1021 vc.deauthorize(vp, "{guest_entity}")
1024 volume_id
=volume_id
,
1025 guest_entity
=guestclient_1
["auth_id"]
1028 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
1029 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
1030 out
= json
.loads(self
.fs
.mon_manager
.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
1032 self
.assertEqual("client.guest1", out
[0]["entity"])
1033 self
.assertEqual("allow rw path=/volumes/groupid", out
[0]["caps"]["mds"])
1034 self
.assertEqual("allow *", out
[0]["caps"]["mgr"])
1035 self
.assertNotIn("osd", out
[0]["caps"])
1038 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
1039 vp = VolumePath("{group_id}", "{volume_id}")
1040 vc.delete_volume(vp)
1043 volume_id
=volume_id
,
1046 def test_recover_metadata(self
):
1048 That volume client can recover from partial auth updates using
1049 metadata files, which store auth info and its update status info.
1051 volumeclient_mount
= self
.mounts
[1]
1052 volumeclient_mount
.umount_wait()
1054 # Configure volumeclient_mount as the handle for driving volumeclient.
1055 self
._configure
_vc
_auth
(volumeclient_mount
, "manila")
1057 group_id
= "groupid"
1058 volume_id
= "volumeid"
1062 "tenant_id": "tenant",
1066 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
1067 vp = VolumePath("{group_id}", "{volume_id}")
1068 vc.create_volume(vp, 1024*1024*10)
1071 volume_id
=volume_id
,
1074 # Authorize 'guestclient' access to the volume.
1075 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
1076 vp = VolumePath("{group_id}", "{volume_id}")
1077 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
1080 volume_id
=volume_id
,
1081 auth_id
=guestclient
["auth_id"],
1082 tenant_id
=guestclient
["tenant_id"]
1085 # Check that auth metadata file for auth ID 'guest' is created.
1086 auth_metadata_filename
= "${0}.meta".format(guestclient
["auth_id"])
1087 self
.assertIn(auth_metadata_filename
, self
.mounts
[0].ls("volumes"))
1089 # Induce partial auth update state by modifying the auth metadata file,
1090 # and then run recovery procedure.
1091 self
._volume
_client
_python
(volumeclient_mount
, dedent("""
1092 vp = VolumePath("{group_id}", "{volume_id}")
1093 auth_metadata = vc._auth_metadata_get("{auth_id}")
1094 auth_metadata['dirty'] = True
1095 vc._auth_metadata_set("{auth_id}", auth_metadata)
1099 volume_id
=volume_id
,
1100 auth_id
=guestclient
["auth_id"],
1103 def test_put_object(self
):
1104 vc_mount
= self
.mounts
[1]
1105 vc_mount
.umount_wait()
1106 self
._configure
_vc
_auth
(vc_mount
, "manila")
1108 obj_data
= 'test data'
1109 obj_name
= 'test_vc_obj_1'
1110 pool_name
= self
.fs
.get_data_pool_names()[0]
1112 self
._volume
_client
_python
(vc_mount
, dedent("""
1113 vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}")
1115 pool_name
= pool_name
,
1116 obj_name
= obj_name
,
1120 read_data
= self
.fs
.rados(['get', obj_name
, '-'], pool
=pool_name
)
1121 self
.assertEqual(obj_data
, read_data
)
1123 def test_get_object(self
):
1124 vc_mount
= self
.mounts
[1]
1125 vc_mount
.umount_wait()
1126 self
._configure
_vc
_auth
(vc_mount
, "manila")
1128 obj_data
= 'test_data'
1129 obj_name
= 'test_vc_ob_2'
1130 pool_name
= self
.fs
.get_data_pool_names()[0]
1132 self
.fs
.rados(['put', obj_name
, '-'], pool
=pool_name
, stdin_data
=obj_data
)
1134 self
._volume
_client
_python
(vc_mount
, dedent("""
1135 data_read = vc.get_object("{pool_name}", "{obj_name}")
1136 assert data_read == b"{obj_data}"
1138 pool_name
= pool_name
,
1139 obj_name
= obj_name
,
1143 def test_put_object_versioned(self
):
1144 vc_mount
= self
.mounts
[1]
1145 vc_mount
.umount_wait()
1146 self
._configure
_vc
_auth
(vc_mount
, "manila")
1148 obj_data
= 'test_data'
1149 obj_name
= 'test_vc_obj'
1150 pool_name
= self
.fs
.get_data_pool_names()[0]
1151 self
.fs
.rados(['put', obj_name
, '-'], pool
=pool_name
, stdin_data
=obj_data
)
1153 self
._volume
_client
_python
(vc_mount
, dedent("""
1154 data, version_before = vc.get_object_and_version("{pool_name}", "{obj_name}")
1156 if sys_version_info.major < 3:
1157 data = data + 'modification1'
1158 elif sys_version_info.major > 3:
1159 data = str.encode(data.decode() + 'modification1')
1161 vc.put_object_versioned("{pool_name}", "{obj_name}", data, version_before)
1162 data, version_after = vc.get_object_and_version("{pool_name}", "{obj_name}")
1163 assert version_after == version_before + 1
1164 """).format(pool_name
=pool_name
, obj_name
=obj_name
))
1166 def test_version_check_for_put_object_versioned(self
):
1167 vc_mount
= self
.mounts
[1]
1168 vc_mount
.umount_wait()
1169 self
._configure
_vc
_auth
(vc_mount
, "manila")
1171 obj_data
= 'test_data'
1172 obj_name
= 'test_vc_ob_2'
1173 pool_name
= self
.fs
.get_data_pool_names()[0]
1174 self
.fs
.rados(['put', obj_name
, '-'], pool
=pool_name
, stdin_data
=obj_data
)
1176 # Test if put_object_versioned() crosschecks the version of the
1177 # given object. Being a negative test, an exception is expected.
1178 expected_exception
= 'rados_OSError'
1179 output
= self
._volume
_client
_python
(vc_mount
, dedent("""
1180 data, version = vc.get_object_and_version("{pool_name}", "{obj_name}")
1182 if sys_version_info.major < 3:
1184 elif sys_version_info.major > 3:
1185 data = str.encode(data.decode('utf-8') + 'm1')
1187 vc.put_object("{pool_name}", "{obj_name}", data)
1189 if sys_version_info.major < 3:
1191 elif sys_version_info.major > 3:
1192 data = str.encode(data.decode('utf-8') + 'm2')
1195 vc.put_object_versioned("{pool_name}", "{obj_name}", data, version)
1196 except {expected_exception}:
1197 print('{expected_exception} raised')
1198 """).format(pool_name
=pool_name
, obj_name
=obj_name
,
1199 expected_exception
=expected_exception
))
1200 self
.assertEqual(expected_exception
+ ' raised', output
)
1203 def test_delete_object(self
):
1204 vc_mount
= self
.mounts
[1]
1205 vc_mount
.umount_wait()
1206 self
._configure
_vc
_auth
(vc_mount
, "manila")
1208 obj_data
= 'test data'
1209 obj_name
= 'test_vc_obj_3'
1210 pool_name
= self
.fs
.get_data_pool_names()[0]
1212 self
.fs
.rados(['put', obj_name
, '-'], pool
=pool_name
, stdin_data
=obj_data
)
1214 self
._volume
_client
_python
(vc_mount
, dedent("""
1215 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1217 pool_name
= pool_name
,
1218 obj_name
= obj_name
,
1221 with self
.assertRaises(CommandFailedError
):
1222 self
.fs
.rados(['stat', obj_name
], pool
=pool_name
)
1224 # Check idempotency -- no error raised trying to delete non-existent
1226 self
._volume
_client
_python
(vc_mount
, dedent("""
1227 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1229 pool_name
= pool_name
,
1230 obj_name
= obj_name
,
1233 def test_21501(self
):
1235 Reproducer for #21501 "ceph_volume_client: sets invalid caps for
1236 existing IDs with no caps" (http://tracker.ceph.com/issues/21501)
1239 vc_mount
= self
.mounts
[1]
1240 vc_mount
.umount_wait()
1242 # Configure vc_mount as the handle for driving volumeclient
1243 self
._configure
_vc
_auth
(vc_mount
, "manila")
1248 mount_path
= self
._volume
_client
_python
(vc_mount
, dedent("""
1249 vp = VolumePath("{group_id}", "{volume_id}")
1250 create_result = vc.create_volume(vp, 1024*1024*10)
1251 print(create_result['mount_path'])
1257 # Create an auth ID with no caps
1259 self
.fs
.mon_manager
.raw_cluster_cmd_result(
1260 'auth', 'get-or-create', 'client.{0}'.format(guest_id
))
1262 guest_mount
= self
.mounts
[2]
1263 guest_mount
.umount_wait()
1265 # Set auth caps for the auth ID using the volumeclient
1266 self
._configure
_guest
_auth
(vc_mount
, guest_mount
, guest_id
, mount_path
,
1267 allow_existing_id
=True)
1269 # Mount the volume in the guest using the auth ID to assert that the
1270 # auth caps are valid
1271 guest_mount
.mount(mount_path
=mount_path
)
1273 def test_volume_without_namespace_isolation(self
):
1275 That volume client can create volumes that do not have separate RADOS
1278 vc_mount
= self
.mounts
[1]
1279 vc_mount
.umount_wait()
1281 # Configure vc_mount as the handle for driving volumeclient
1282 self
._configure
_vc
_auth
(vc_mount
, "manila")
1285 volume_prefix
= "/myprefix"
1288 self
._volume
_client
_python
(vc_mount
, dedent("""
1289 vp = VolumePath("{group_id}", "{volume_id}")
1290 create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
1291 print(create_result['mount_path'])
1297 # The CephFS volume should be created
1298 self
.mounts
[0].stat(os
.path
.join("myprefix", group_id
, volume_id
))
1299 vol_namespace
= self
.mounts
[0].getfattr(
1300 os
.path
.join("myprefix", group_id
, volume_id
),
1301 "ceph.dir.layout.pool_namespace")
1302 assert not vol_namespace
1304 self
._volume
_client
_python
(vc_mount
, dedent("""
1305 vp = VolumePath("{group_id}", "{volume_id}")
1306 vc.delete_volume(vp)
1310 volume_id
=volume_id
,