]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
3e6c7d63b72f5fa48476249b287486aae9806e8f
[ceph.git] / ceph / qa / tasks / cephfs / test_volume_client.py
1 import json
2 import logging
3 import os
4 from textwrap import dedent
5 from tasks.cephfs.cephfs_test_case import CephFSTestCase
6 from tasks.cephfs.fuse_mount import FuseMount
7 from teuthology.exceptions import CommandFailedError
8 from teuthology.misc import sudo_write_file
9
10 log = logging.getLogger(__name__)
11
12
13 class TestVolumeClient(CephFSTestCase):
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
16 CLIENTS_REQUIRED = 4
17 default_py_version = 'python3'
18
19 def setUp(self):
20 CephFSTestCase.setUp(self)
21 self.py_version = self.ctx.config.get('overrides', {}).\
22 get('python3', TestVolumeClient.default_py_version)
23 log.info("using python version: {python_version}".format(
24 python_version=self.py_version
25 ))
26
27 def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
28 # Can't dedent this *and* the script we pass in, because they might have different
29 # levels of indentation to begin with, so leave this string zero-indented
30 if vol_prefix:
31 vol_prefix = "\"" + vol_prefix + "\""
32 if ns_prefix:
33 ns_prefix = "\"" + ns_prefix + "\""
34 return client.run_python("""
35 from __future__ import print_function
36 from ceph_volume_client import CephFSVolumeClient, VolumePath
37 from sys import version_info as sys_version_info
38 from rados import OSError as rados_OSError
39 import logging
40 log = logging.getLogger("ceph_volume_client")
41 log.addHandler(logging.StreamHandler())
42 log.setLevel(logging.DEBUG)
43 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
44 vc.connect()
45 {payload}
46 vc.disconnect()
47 """.format(payload=script, conf_path=client.config_path,
48 vol_prefix=vol_prefix, ns_prefix=ns_prefix),
49 self.py_version)
50
51 def _configure_vc_auth(self, mount, id_name):
52 """
53 Set up auth credentials for the VolumeClient user
54 """
55 out = self.fs.mon_manager.raw_cluster_cmd(
56 "auth", "get-or-create", "client.{name}".format(name=id_name),
57 "mds", "allow *",
58 "osd", "allow rw",
59 "mon", "allow *"
60 )
61 mount.client_id = id_name
62 sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
63 self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
64
65 def _configure_guest_auth(self, volumeclient_mount, guest_mount,
66 guest_entity, mount_path,
67 namespace_prefix=None, readonly=False,
68 tenant_id=None, allow_existing_id=False):
69 """
70 Set up auth credentials for the guest client to mount a volume.
71
72 :param volumeclient_mount: mount used as the handle for driving
73 volumeclient.
74 :param guest_mount: mount used by the guest client.
75 :param guest_entity: auth ID used by the guest client.
76 :param mount_path: path of the volume.
77 :param namespace_prefix: name prefix of the RADOS namespace, which
78 is used for the volume's layout.
79 :param readonly: defaults to False. If set to 'True' only read-only
80 mount access is granted to the guest.
81 :param tenant_id: (OpenStack) tenant ID of the guest client.
82 """
83
84 head, volume_id = os.path.split(mount_path)
85 head, group_id = os.path.split(head)
86 head, volume_prefix = os.path.split(head)
87 volume_prefix = "/" + volume_prefix
88
89 # Authorize the guest client's auth ID to mount the volume.
90 key = self._volume_client_python(volumeclient_mount, dedent("""
91 vp = VolumePath("{group_id}", "{volume_id}")
92 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
93 tenant_id="{tenant_id}",
94 allow_existing_id="{allow_existing_id}")
95 print(auth_result['auth_key'])
96 """.format(
97 group_id=group_id,
98 volume_id=volume_id,
99 guest_entity=guest_entity,
100 readonly=readonly,
101 tenant_id=tenant_id,
102 allow_existing_id=allow_existing_id)), volume_prefix, namespace_prefix
103 )
104
105 # CephFSVolumeClient's authorize() does not return the secret
106 # key to a caller who isn't multi-tenant aware. Explicitly
107 # query the key for such a client.
108 if not tenant_id:
109 key = self.fs.mon_manager.raw_cluster_cmd(
110 "auth", "get-key", "client.{name}".format(name=guest_entity),
111 )
112
113 # The guest auth ID should exist.
114 existing_ids = [a['entity'] for a in self.auth_list()]
115 self.assertIn("client.{0}".format(guest_entity), existing_ids)
116
117 # Create keyring file for the guest client.
118 keyring_txt = dedent("""
119 [client.{guest_entity}]
120 key = {key}
121
122 """.format(
123 guest_entity=guest_entity,
124 key=key
125 ))
126 guest_mount.client_id = guest_entity
127 sudo_write_file(guest_mount.client_remote,
128 guest_mount.get_keyring_path(), keyring_txt)
129
130 # Add a guest client section to the ceph config file.
131 self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
132 self.set_conf("client.{0}".format(guest_entity), "debug client", "20")
133 self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20")
134 self.set_conf("client.{0}".format(guest_entity),
135 "keyring", guest_mount.get_keyring_path())
136
137 def test_default_prefix(self):
138 group_id = "grpid"
139 volume_id = "volid"
140 DEFAULT_VOL_PREFIX = "volumes"
141 DEFAULT_NS_PREFIX = "fsvolumens_"
142
143 self.mount_b.umount_wait()
144 self._configure_vc_auth(self.mount_b, "manila")
145
146 #create a volume with default prefix
147 self._volume_client_python(self.mount_b, dedent("""
148 vp = VolumePath("{group_id}", "{volume_id}")
149 vc.create_volume(vp, 10, data_isolated=True)
150 """.format(
151 group_id=group_id,
152 volume_id=volume_id,
153 )))
154
155 # The dir should be created
156 self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id))
157
158 #namespace should be set
159 ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace")
160 namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id)
161 self.assertEqual(namespace, ns_in_attr)
162
163
164 def test_lifecycle(self):
165 """
166 General smoke test for create, extend, destroy
167 """
168
169 # I'm going to use mount_c later as a guest for mounting the created
170 # shares
171 self.mounts[2].umount_wait()
172
173 # I'm going to leave mount_b unmounted and just use it as a handle for
174 # driving volumeclient. It's a little hacky but we don't have a more
175 # general concept for librados/libcephfs clients as opposed to full
176 # blown mounting clients.
177 self.mount_b.umount_wait()
178 self._configure_vc_auth(self.mount_b, "manila")
179
180 guest_entity = "guest"
181 group_id = "grpid"
182 volume_id = "volid"
183
184 volume_prefix = "/myprefix"
185 namespace_prefix = "mynsprefix_"
186
187 # Create a 100MB volume
188 volume_size = 100
189 mount_path = self._volume_client_python(self.mount_b, dedent("""
190 vp = VolumePath("{group_id}", "{volume_id}")
191 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
192 print(create_result['mount_path'])
193 """.format(
194 group_id=group_id,
195 volume_id=volume_id,
196 volume_size=volume_size
197 )), volume_prefix, namespace_prefix)
198
199 # The dir should be created
200 self.mount_a.stat(os.path.join("myprefix", group_id, volume_id))
201
202 # Authorize and configure credentials for the guest to mount the
203 # the volume.
204 self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
205 mount_path, namespace_prefix)
206 self.mounts[2].mount(mount_path=mount_path)
207
208 # The kernel client doesn't have the quota-based df behaviour,
209 # or quotas at all, so only exercise the client behaviour when
210 # running fuse.
211 if isinstance(self.mounts[2], FuseMount):
212 # df should see volume size, same as the quota set on volume's dir
213 self.assertEqual(self.mounts[2].df()['total'],
214 volume_size * 1024 * 1024)
215 self.assertEqual(
216 self.mount_a.getfattr(
217 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
218 "ceph.quota.max_bytes"),
219 "%s" % (volume_size * 1024 * 1024))
220
221 # df granularity is 4MB block so have to write at least that much
222 data_bin_mb = 4
223 self.mounts[2].write_n_mb("data.bin", data_bin_mb)
224
225 # Write something outside volume to check this space usage is
226 # not reported in the volume's DF.
227 other_bin_mb = 8
228 self.mount_a.write_n_mb("other.bin", other_bin_mb)
229
230 # global: df should see all the writes (data + other). This is a >
231 # rather than a == because the global spaced used includes all pools
232 def check_df():
233 used = self.mount_a.df()['used']
234 return used >= (other_bin_mb * 1024 * 1024)
235
236 self.wait_until_true(check_df, timeout=30)
237
238 # Hack: do a metadata IO to kick rstats
239 self.mounts[2].run_shell(["touch", "foo"])
240
241 # volume: df should see the data_bin_mb consumed from quota, same
242 # as the rbytes for the volume's dir
243 self.wait_until_equal(
244 lambda: self.mounts[2].df()['used'],
245 data_bin_mb * 1024 * 1024, timeout=60)
246 self.wait_until_equal(
247 lambda: self.mount_a.getfattr(
248 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
249 "ceph.dir.rbytes"),
250 "%s" % (data_bin_mb * 1024 * 1024), timeout=60)
251
252 # sync so that file data are persist to rados
253 self.mounts[2].run_shell(["sync"])
254
255 # Our data should stay in particular rados namespace
256 pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool")
257 namespace = "{0}{1}".format(namespace_prefix, volume_id)
258 ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace")
259 self.assertEqual(namespace, ns_in_attr)
260
261 objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n"))
262 self.assertNotEqual(objects_in_ns, set())
263
264 # De-authorize the guest
265 self._volume_client_python(self.mount_b, dedent("""
266 vp = VolumePath("{group_id}", "{volume_id}")
267 vc.deauthorize(vp, "{guest_entity}")
268 vc.evict("{guest_entity}")
269 """.format(
270 group_id=group_id,
271 volume_id=volume_id,
272 guest_entity=guest_entity
273 )), volume_prefix, namespace_prefix)
274
275 # Once deauthorized, the client should be unable to do any more metadata ops
276 # The way that the client currently behaves here is to block (it acts like
277 # it has lost network, because there is nothing to tell it that is messages
278 # are being dropped because it's identity is gone)
279 background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False)
280 try:
281 background.wait()
282 except CommandFailedError:
283 # command failed with EBLACKLISTED?
284 if "transport endpoint shutdown" in background.stderr.getvalue():
285 pass
286 else:
287 raise
288
289 # After deauthorisation, the client ID should be gone (this was the only
290 # volume it was authorised for)
291 self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()])
292
293 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
294 self.mounts[2].umount_wait()
295
296 self._volume_client_python(self.mount_b, dedent("""
297 vp = VolumePath("{group_id}", "{volume_id}")
298 vc.delete_volume(vp)
299 vc.purge_volume(vp)
300 """.format(
301 group_id=group_id,
302 volume_id=volume_id,
303 )), volume_prefix, namespace_prefix)
304
305 def test_idempotency(self):
306 """
307 That the volumeclient interface works when calling everything twice
308 """
309 self.mount_b.umount_wait()
310 self._configure_vc_auth(self.mount_b, "manila")
311
312 guest_entity = "guest"
313 group_id = "grpid"
314 volume_id = "volid"
315 self._volume_client_python(self.mount_b, dedent("""
316 vp = VolumePath("{group_id}", "{volume_id}")
317 vc.create_volume(vp, 10)
318 vc.create_volume(vp, 10)
319 vc.authorize(vp, "{guest_entity}")
320 vc.authorize(vp, "{guest_entity}")
321 vc.deauthorize(vp, "{guest_entity}")
322 vc.deauthorize(vp, "{guest_entity}")
323 vc.delete_volume(vp)
324 vc.delete_volume(vp)
325 vc.purge_volume(vp)
326 vc.purge_volume(vp)
327
328 vc.create_volume(vp, 10, data_isolated=True)
329 vc.create_volume(vp, 10, data_isolated=True)
330 vc.authorize(vp, "{guest_entity}")
331 vc.authorize(vp, "{guest_entity}")
332 vc.deauthorize(vp, "{guest_entity}")
333 vc.deauthorize(vp, "{guest_entity}")
334 vc.evict("{guest_entity}")
335 vc.evict("{guest_entity}")
336 vc.delete_volume(vp, data_isolated=True)
337 vc.delete_volume(vp, data_isolated=True)
338 vc.purge_volume(vp, data_isolated=True)
339 vc.purge_volume(vp, data_isolated=True)
340
341 vc.create_volume(vp, 10, namespace_isolated=False)
342 vc.create_volume(vp, 10, namespace_isolated=False)
343 vc.authorize(vp, "{guest_entity}")
344 vc.authorize(vp, "{guest_entity}")
345 vc.deauthorize(vp, "{guest_entity}")
346 vc.deauthorize(vp, "{guest_entity}")
347 vc.evict("{guest_entity}")
348 vc.evict("{guest_entity}")
349 vc.delete_volume(vp)
350 vc.delete_volume(vp)
351 vc.purge_volume(vp)
352 vc.purge_volume(vp)
353 """.format(
354 group_id=group_id,
355 volume_id=volume_id,
356 guest_entity=guest_entity
357 )))
358
359 def test_data_isolated(self):
360 """
361 That data isolated shares get their own pool
362 :return:
363 """
364
365 self.mount_b.umount_wait()
366 self._configure_vc_auth(self.mount_b, "manila")
367
368 pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
369
370 group_id = "grpid"
371 volume_id = "volid"
372 self._volume_client_python(self.mount_b, dedent("""
373 vp = VolumePath("{group_id}", "{volume_id}")
374 vc.create_volume(vp, data_isolated=True)
375 """.format(
376 group_id=group_id,
377 volume_id=volume_id,
378 )))
379
380 pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
381
382 # Should have created one new pool
383 new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
384 self.assertEqual(len(new_pools), 1)
385
386 def test_15303(self):
387 """
388 Reproducer for #15303 "Client holds incorrect complete flag on dir
389 after losing caps" (http://tracker.ceph.com/issues/15303)
390 """
391 for m in self.mounts:
392 m.umount_wait()
393
394 # Create a dir on mount A
395 self.mount_a.mount_wait()
396 self.mount_a.run_shell(["mkdir", "parent1"])
397 self.mount_a.run_shell(["mkdir", "parent2"])
398 self.mount_a.run_shell(["mkdir", "parent1/mydir"])
399
400 # Put some files in it from mount B
401 self.mount_b.mount_wait()
402 self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
403 self.mount_b.umount_wait()
404
405 # List the dir's contents on mount A
406 self.assertListEqual(self.mount_a.ls("parent1/mydir"),
407 ["afile"])
408
409 def test_evict_client(self):
410 """
411 That a volume client can be evicted based on its auth ID and the volume
412 path it has mounted.
413 """
414
415 if not isinstance(self.mount_a, FuseMount):
416 self.skipTest("Requires FUSE client to inject client metadata")
417
418 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
419 # and mounts[3] would be used as guests to mount the volumes/shares.
420
421 for i in range(1, 4):
422 self.mounts[i].umount_wait()
423
424 volumeclient_mount = self.mounts[1]
425 self._configure_vc_auth(volumeclient_mount, "manila")
426 guest_mounts = (self.mounts[2], self.mounts[3])
427
428 guest_entity = "guest"
429 group_id = "grpid"
430 mount_paths = []
431 volume_ids = []
432
433 # Create two volumes. Authorize 'guest' auth ID to mount the two
434 # volumes. Mount the two volumes. Write data to the volumes.
435 for i in range(2):
436 # Create volume.
437 volume_ids.append("volid_{0}".format(str(i)))
438 mount_paths.append(
439 self._volume_client_python(volumeclient_mount, dedent("""
440 vp = VolumePath("{group_id}", "{volume_id}")
441 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
442 print(create_result['mount_path'])
443 """.format(
444 group_id=group_id,
445 volume_id=volume_ids[i]
446 ))))
447
448 # Authorize 'guest' auth ID to mount the volume.
449 self._configure_guest_auth(volumeclient_mount, guest_mounts[i],
450 guest_entity, mount_paths[i])
451
452 # Mount the volume.
453 guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format(
454 id=guest_entity, suffix=str(i))
455 guest_mounts[i].mount(mount_path=mount_paths[i])
456 guest_mounts[i].write_n_mb("data.bin", 1)
457
458
459 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
460 # one volume.
461 self._volume_client_python(self.mount_b, dedent("""
462 vp = VolumePath("{group_id}", "{volume_id}")
463 vc.deauthorize(vp, "{guest_entity}")
464 vc.evict("{guest_entity}", volume_path=vp)
465 """.format(
466 group_id=group_id,
467 volume_id=volume_ids[0],
468 guest_entity=guest_entity
469 )))
470
471 # Evicted guest client, guest_mounts[0], should not be able to do
472 # anymore metadata ops. It should start failing all operations
473 # when it sees that its own address is in the blacklist.
474 try:
475 guest_mounts[0].write_n_mb("rogue.bin", 1)
476 except CommandFailedError:
477 pass
478 else:
479 raise RuntimeError("post-eviction write should have failed!")
480
481 # The blacklisted guest client should now be unmountable
482 guest_mounts[0].umount_wait()
483
484 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
485 # has mounted the other volume, should be able to use its volume
486 # unaffected.
487 guest_mounts[1].write_n_mb("data.bin.1", 1)
488
489 # Cleanup.
490 for i in range(2):
491 self._volume_client_python(volumeclient_mount, dedent("""
492 vp = VolumePath("{group_id}", "{volume_id}")
493 vc.deauthorize(vp, "{guest_entity}")
494 vc.delete_volume(vp)
495 vc.purge_volume(vp)
496 """.format(
497 group_id=group_id,
498 volume_id=volume_ids[i],
499 guest_entity=guest_entity
500 )))
501
502
503 def test_purge(self):
504 """
505 Reproducer for #15266, exception trying to purge volumes that
506 contain non-ascii filenames.
507
508 Additionally test any other purge corner cases here.
509 """
510 # I'm going to leave mount_b unmounted and just use it as a handle for
511 # driving volumeclient. It's a little hacky but we don't have a more
512 # general concept for librados/libcephfs clients as opposed to full
513 # blown mounting clients.
514 self.mount_b.umount_wait()
515 self._configure_vc_auth(self.mount_b, "manila")
516
517 group_id = "grpid"
518 # Use a unicode volume ID (like Manila), to reproduce #15266
519 volume_id = u"volid"
520
521 # Create
522 mount_path = self._volume_client_python(self.mount_b, dedent("""
523 vp = VolumePath("{group_id}", u"{volume_id}")
524 create_result = vc.create_volume(vp, 10)
525 print(create_result['mount_path'])
526 """.format(
527 group_id=group_id,
528 volume_id=volume_id
529 )))
530
531 # Strip leading "/"
532 mount_path = mount_path[1:]
533
534 # A file with non-ascii characters
535 self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")])
536
537 # A file with no permissions to do anything
538 self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")])
539 self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")])
540
541 self._volume_client_python(self.mount_b, dedent("""
542 vp = VolumePath("{group_id}", u"{volume_id}")
543 vc.delete_volume(vp)
544 vc.purge_volume(vp)
545 """.format(
546 group_id=group_id,
547 volume_id=volume_id
548 )))
549
550 # Check it's really gone
551 self.assertEqual(self.mount_a.ls("volumes/_deleting"), [])
552 self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id])
553
554 def test_readonly_authorization(self):
555 """
556 That guest clients can be restricted to read-only mounts of volumes.
557 """
558
559 volumeclient_mount = self.mounts[1]
560 guest_mount = self.mounts[2]
561 volumeclient_mount.umount_wait()
562 guest_mount.umount_wait()
563
564 # Configure volumeclient_mount as the handle for driving volumeclient.
565 self._configure_vc_auth(volumeclient_mount, "manila")
566
567 guest_entity = "guest"
568 group_id = "grpid"
569 volume_id = "volid"
570
571 # Create a volume.
572 mount_path = self._volume_client_python(volumeclient_mount, dedent("""
573 vp = VolumePath("{group_id}", "{volume_id}")
574 create_result = vc.create_volume(vp, 1024*1024*10)
575 print(create_result['mount_path'])
576 """.format(
577 group_id=group_id,
578 volume_id=volume_id,
579 )))
580
581 # Authorize and configure credentials for the guest to mount the
582 # the volume with read-write access.
583 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
584 mount_path, readonly=False)
585
586 # Mount the volume, and write to it.
587 guest_mount.mount(mount_path=mount_path)
588 guest_mount.write_n_mb("data.bin", 1)
589
590 # Change the guest auth ID's authorization to read-only mount access.
591 self._volume_client_python(volumeclient_mount, dedent("""
592 vp = VolumePath("{group_id}", "{volume_id}")
593 vc.deauthorize(vp, "{guest_entity}")
594 """.format(
595 group_id=group_id,
596 volume_id=volume_id,
597 guest_entity=guest_entity
598 )))
599 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
600 mount_path, readonly=True)
601
602 # The effect of the change in access level to read-only is not
603 # immediate. The guest sees the change only after a remount of
604 # the volume.
605 guest_mount.umount_wait()
606 guest_mount.mount(mount_path=mount_path)
607
608 # Read existing content of the volume.
609 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
610 # Cannot write into read-only volume.
611 try:
612 guest_mount.write_n_mb("rogue.bin", 1)
613 except CommandFailedError:
614 pass
615
616 def test_get_authorized_ids(self):
617 """
618 That for a volume, the authorized IDs and their access levels
619 can be obtained using CephFSVolumeClient's get_authorized_ids().
620 """
621 volumeclient_mount = self.mounts[1]
622 volumeclient_mount.umount_wait()
623
624 # Configure volumeclient_mount as the handle for driving volumeclient.
625 self._configure_vc_auth(volumeclient_mount, "manila")
626
627 group_id = "grpid"
628 volume_id = "volid"
629 guest_entity_1 = "guest1"
630 guest_entity_2 = "guest2"
631
632 log.info("print(group ID: {0})".format(group_id))
633
634 # Create a volume.
635 auths = self._volume_client_python(volumeclient_mount, dedent("""
636 vp = VolumePath("{group_id}", "{volume_id}")
637 vc.create_volume(vp, 1024*1024*10)
638 auths = vc.get_authorized_ids(vp)
639 print(auths)
640 """.format(
641 group_id=group_id,
642 volume_id=volume_id,
643 )))
644 # Check the list of authorized IDs for the volume.
645 self.assertEqual('None', auths)
646
647 # Allow two auth IDs access to the volume.
648 auths = self._volume_client_python(volumeclient_mount, dedent("""
649 vp = VolumePath("{group_id}", "{volume_id}")
650 vc.authorize(vp, "{guest_entity_1}", readonly=False)
651 vc.authorize(vp, "{guest_entity_2}", readonly=True)
652 auths = vc.get_authorized_ids(vp)
653 print(auths)
654 """.format(
655 group_id=group_id,
656 volume_id=volume_id,
657 guest_entity_1=guest_entity_1,
658 guest_entity_2=guest_entity_2,
659 )))
660 # Check the list of authorized IDs and their access levels.
661 if self.py_version == 'python3':
662 expected_result = [('guest1', 'rw'), ('guest2', 'r')]
663 self.assertCountEqual(str(expected_result), auths)
664 else:
665 expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
666 self.assertItemsEqual(str(expected_result), auths)
667
668 # Disallow both the auth IDs' access to the volume.
669 auths = self._volume_client_python(volumeclient_mount, dedent("""
670 vp = VolumePath("{group_id}", "{volume_id}")
671 vc.deauthorize(vp, "{guest_entity_1}")
672 vc.deauthorize(vp, "{guest_entity_2}")
673 auths = vc.get_authorized_ids(vp)
674 print(auths)
675 """.format(
676 group_id=group_id,
677 volume_id=volume_id,
678 guest_entity_1=guest_entity_1,
679 guest_entity_2=guest_entity_2,
680 )))
681 # Check the list of authorized IDs for the volume.
682 self.assertEqual('None', auths)
683
684 def test_multitenant_volumes(self):
685 """
686 That volume access can be restricted to a tenant.
687
688 That metadata used to enforce tenant isolation of
689 volumes is stored as a two-way mapping between auth
690 IDs and volumes that they're authorized to access.
691 """
692 volumeclient_mount = self.mounts[1]
693 volumeclient_mount.umount_wait()
694
695 # Configure volumeclient_mount as the handle for driving volumeclient.
696 self._configure_vc_auth(volumeclient_mount, "manila")
697
698 group_id = "groupid"
699 volume_id = "volumeid"
700
701 # Guest clients belonging to different tenants, but using the same
702 # auth ID.
703 auth_id = "guest"
704 guestclient_1 = {
705 "auth_id": auth_id,
706 "tenant_id": "tenant1",
707 }
708 guestclient_2 = {
709 "auth_id": auth_id,
710 "tenant_id": "tenant2",
711 }
712
713 # Create a volume.
714 self._volume_client_python(volumeclient_mount, dedent("""
715 vp = VolumePath("{group_id}", "{volume_id}")
716 vc.create_volume(vp, 1024*1024*10)
717 """.format(
718 group_id=group_id,
719 volume_id=volume_id,
720 )))
721
722 # Check that volume metadata file is created on volume creation.
723 vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id)
724 self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
725
726 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
727 # 'tenant1', with 'rw' access to the volume.
728 self._volume_client_python(volumeclient_mount, dedent("""
729 vp = VolumePath("{group_id}", "{volume_id}")
730 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
731 """.format(
732 group_id=group_id,
733 volume_id=volume_id,
734 auth_id=guestclient_1["auth_id"],
735 tenant_id=guestclient_1["tenant_id"]
736 )))
737
738 # Check that auth metadata file for auth ID 'guest', is
739 # created on authorizing 'guest' access to the volume.
740 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
741 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
742
743 # Verify that the auth metadata file stores the tenant ID that the
744 # auth ID belongs to, the auth ID's authorized access levels
745 # for different volumes, versioning details, etc.
746 expected_auth_metadata = {
747 "version": 2,
748 "compat_version": 1,
749 "dirty": False,
750 "tenant_id": "tenant1",
751 "volumes": {
752 "groupid/volumeid": {
753 "dirty": False,
754 "access_level": "rw"
755 }
756 }
757 }
758
759 auth_metadata = self._volume_client_python(volumeclient_mount, dedent("""
760 import json
761 vp = VolumePath("{group_id}", "{volume_id}")
762 auth_metadata = vc._auth_metadata_get("{auth_id}")
763 print(json.dumps(auth_metadata))
764 """.format(
765 group_id=group_id,
766 volume_id=volume_id,
767 auth_id=guestclient_1["auth_id"],
768 )))
769 auth_metadata = json.loads(auth_metadata)
770
771 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
772 del expected_auth_metadata["version"]
773 del auth_metadata["version"]
774 self.assertEqual(expected_auth_metadata, auth_metadata)
775
776 # Verify that the volume metadata file stores info about auth IDs
777 # and their access levels to the volume, versioning details, etc.
778 expected_vol_metadata = {
779 "version": 2,
780 "compat_version": 1,
781 "auths": {
782 "guest": {
783 "dirty": False,
784 "access_level": "rw"
785 }
786 }
787 }
788
789 vol_metadata = self._volume_client_python(volumeclient_mount, dedent("""
790 import json
791 vp = VolumePath("{group_id}", "{volume_id}")
792 volume_metadata = vc._volume_metadata_get(vp)
793 print(json.dumps(volume_metadata))
794 """.format(
795 group_id=group_id,
796 volume_id=volume_id,
797 )))
798 vol_metadata = json.loads(vol_metadata)
799
800 self.assertGreaterEqual(vol_metadata["version"], expected_vol_metadata["version"])
801 del expected_vol_metadata["version"]
802 del vol_metadata["version"]
803 self.assertEqual(expected_vol_metadata, vol_metadata)
804
805 # Cannot authorize 'guestclient_2' to access the volume.
806 # It uses auth ID 'guest', which has already been used by a
807 # 'guestclient_1' belonging to an another tenant for accessing
808 # the volume.
809 with self.assertRaises(CommandFailedError):
810 self._volume_client_python(volumeclient_mount, dedent("""
811 vp = VolumePath("{group_id}", "{volume_id}")
812 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
813 """.format(
814 group_id=group_id,
815 volume_id=volume_id,
816 auth_id=guestclient_2["auth_id"],
817 tenant_id=guestclient_2["tenant_id"]
818 )))
819
820 # Check that auth metadata file is cleaned up on removing
821 # auth ID's only access to a volume.
822 self._volume_client_python(volumeclient_mount, dedent("""
823 vp = VolumePath("{group_id}", "{volume_id}")
824 vc.deauthorize(vp, "{guest_entity}")
825 """.format(
826 group_id=group_id,
827 volume_id=volume_id,
828 guest_entity=guestclient_1["auth_id"]
829 )))
830
831 self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
832
833 # Check that volume metadata file is cleaned up on volume deletion.
834 self._volume_client_python(volumeclient_mount, dedent("""
835 vp = VolumePath("{group_id}", "{volume_id}")
836 vc.delete_volume(vp)
837 """.format(
838 group_id=group_id,
839 volume_id=volume_id,
840 )))
841 self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
842
843 def test_authorize_auth_id_not_created_by_ceph_volume_client(self):
844 """
845 If the auth_id already exists and is not created by
846 ceph_volume_client, it's not allowed to authorize
847 the auth-id by default.
848 """
849 volumeclient_mount = self.mounts[1]
850 volumeclient_mount.umount_wait()
851
852 # Configure volumeclient_mount as the handle for driving volumeclient.
853 self._configure_vc_auth(volumeclient_mount, "manila")
854
855 group_id = "groupid"
856 volume_id = "volumeid"
857
858 # Create auth_id
859 self.fs.mon_manager.raw_cluster_cmd(
860 "auth", "get-or-create", "client.guest1",
861 "mds", "allow *",
862 "osd", "allow rw",
863 "mon", "allow *"
864 )
865
866 auth_id = "guest1"
867 guestclient_1 = {
868 "auth_id": auth_id,
869 "tenant_id": "tenant1",
870 }
871
872 # Create a volume.
873 self._volume_client_python(volumeclient_mount, dedent("""
874 vp = VolumePath("{group_id}", "{volume_id}")
875 vc.create_volume(vp, 1024*1024*10)
876 """.format(
877 group_id=group_id,
878 volume_id=volume_id,
879 )))
880
881 # Cannot authorize 'guestclient_1' to access the volume.
882 # It uses auth ID 'guest1', which already exists and not
883 # created by ceph_volume_client
884 with self.assertRaises(CommandFailedError):
885 self._volume_client_python(volumeclient_mount, dedent("""
886 vp = VolumePath("{group_id}", "{volume_id}")
887 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
888 """.format(
889 group_id=group_id,
890 volume_id=volume_id,
891 auth_id=guestclient_1["auth_id"],
892 tenant_id=guestclient_1["tenant_id"]
893 )))
894
895 # Delete volume
896 self._volume_client_python(volumeclient_mount, dedent("""
897 vp = VolumePath("{group_id}", "{volume_id}")
898 vc.delete_volume(vp)
899 """.format(
900 group_id=group_id,
901 volume_id=volume_id,
902 )))
903
904 def test_authorize_allow_existing_id_option(self):
905 """
906 If the auth_id already exists and is not created by
907 ceph_volume_client, it's not allowed to authorize
908 the auth-id by default but is allowed with option
909 allow_existing_id.
910 """
911 volumeclient_mount = self.mounts[1]
912 volumeclient_mount.umount_wait()
913
914 # Configure volumeclient_mount as the handle for driving volumeclient.
915 self._configure_vc_auth(volumeclient_mount, "manila")
916
917 group_id = "groupid"
918 volume_id = "volumeid"
919
920 # Create auth_id
921 self.fs.mon_manager.raw_cluster_cmd(
922 "auth", "get-or-create", "client.guest1",
923 "mds", "allow *",
924 "osd", "allow rw",
925 "mon", "allow *"
926 )
927
928 auth_id = "guest1"
929 guestclient_1 = {
930 "auth_id": auth_id,
931 "tenant_id": "tenant1",
932 }
933
934 # Create a volume.
935 self._volume_client_python(volumeclient_mount, dedent("""
936 vp = VolumePath("{group_id}", "{volume_id}")
937 vc.create_volume(vp, 1024*1024*10)
938 """.format(
939 group_id=group_id,
940 volume_id=volume_id,
941 )))
942
943 # Cannot authorize 'guestclient_1' to access the volume
944 # by default, which already exists and not created by
945 # ceph_volume_client but is allowed with option 'allow_existing_id'.
946 self._volume_client_python(volumeclient_mount, dedent("""
947 vp = VolumePath("{group_id}", "{volume_id}")
948 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}",
949 allow_existing_id="{allow_existing_id}")
950 """.format(
951 group_id=group_id,
952 volume_id=volume_id,
953 auth_id=guestclient_1["auth_id"],
954 tenant_id=guestclient_1["tenant_id"],
955 allow_existing_id=True
956 )))
957
958 # Delete volume
959 self._volume_client_python(volumeclient_mount, dedent("""
960 vp = VolumePath("{group_id}", "{volume_id}")
961 vc.delete_volume(vp)
962 """.format(
963 group_id=group_id,
964 volume_id=volume_id,
965 )))
966
967 def test_deauthorize_auth_id_after_out_of_band_update(self):
968 """
969 If the auth_id authorized by ceph_volume_client is updated
970 out of band, the auth_id should not be deleted after a
971 deauthorize. It should only remove caps associated it.
972 """
973 volumeclient_mount = self.mounts[1]
974 volumeclient_mount.umount_wait()
975
976 # Configure volumeclient_mount as the handle for driving volumeclient.
977 self._configure_vc_auth(volumeclient_mount, "manila")
978
979 group_id = "groupid"
980 volume_id = "volumeid"
981
982
983 auth_id = "guest1"
984 guestclient_1 = {
985 "auth_id": auth_id,
986 "tenant_id": "tenant1",
987 }
988
989 # Create a volume.
990 self._volume_client_python(volumeclient_mount, dedent("""
991 vp = VolumePath("{group_id}", "{volume_id}")
992 vc.create_volume(vp, 1024*1024*10)
993 """.format(
994 group_id=group_id,
995 volume_id=volume_id,
996 )))
997
998 # Authorize 'guestclient_1' to access the volume.
999 self._volume_client_python(volumeclient_mount, dedent("""
1000 vp = VolumePath("{group_id}", "{volume_id}")
1001 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
1002 """.format(
1003 group_id=group_id,
1004 volume_id=volume_id,
1005 auth_id=guestclient_1["auth_id"],
1006 tenant_id=guestclient_1["tenant_id"]
1007 )))
1008
1009 # Update caps for guestclient_1 out of band
1010 out = self.fs.mon_manager.raw_cluster_cmd(
1011 "auth", "caps", "client.guest1",
1012 "mds", "allow rw path=/volumes/groupid, allow rw path=/volumes/groupid/volumeid",
1013 "osd", "allow rw pool=cephfs_data namespace=fsvolumens_volumeid",
1014 "mon", "allow r",
1015 "mgr", "allow *"
1016 )
1017
1018 # Deauthorize guestclient_1
1019 self._volume_client_python(volumeclient_mount, dedent("""
1020 vp = VolumePath("{group_id}", "{volume_id}")
1021 vc.deauthorize(vp, "{guest_entity}")
1022 """.format(
1023 group_id=group_id,
1024 volume_id=volume_id,
1025 guest_entity=guestclient_1["auth_id"]
1026 )))
1027
1028 # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
1029 # guestclient_1. The mgr and mds caps should be present which was updated out of band.
1030 out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
1031
1032 self.assertEqual("client.guest1", out[0]["entity"])
1033 self.assertEqual("allow rw path=/volumes/groupid", out[0]["caps"]["mds"])
1034 self.assertEqual("allow *", out[0]["caps"]["mgr"])
1035 self.assertNotIn("osd", out[0]["caps"])
1036
1037 # Delete volume
1038 self._volume_client_python(volumeclient_mount, dedent("""
1039 vp = VolumePath("{group_id}", "{volume_id}")
1040 vc.delete_volume(vp)
1041 """.format(
1042 group_id=group_id,
1043 volume_id=volume_id,
1044 )))
1045
1046 def test_recover_metadata(self):
1047 """
1048 That volume client can recover from partial auth updates using
1049 metadata files, which store auth info and its update status info.
1050 """
1051 volumeclient_mount = self.mounts[1]
1052 volumeclient_mount.umount_wait()
1053
1054 # Configure volumeclient_mount as the handle for driving volumeclient.
1055 self._configure_vc_auth(volumeclient_mount, "manila")
1056
1057 group_id = "groupid"
1058 volume_id = "volumeid"
1059
1060 guestclient = {
1061 "auth_id": "guest",
1062 "tenant_id": "tenant",
1063 }
1064
1065 # Create a volume.
1066 self._volume_client_python(volumeclient_mount, dedent("""
1067 vp = VolumePath("{group_id}", "{volume_id}")
1068 vc.create_volume(vp, 1024*1024*10)
1069 """.format(
1070 group_id=group_id,
1071 volume_id=volume_id,
1072 )))
1073
1074 # Authorize 'guestclient' access to the volume.
1075 self._volume_client_python(volumeclient_mount, dedent("""
1076 vp = VolumePath("{group_id}", "{volume_id}")
1077 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
1078 """.format(
1079 group_id=group_id,
1080 volume_id=volume_id,
1081 auth_id=guestclient["auth_id"],
1082 tenant_id=guestclient["tenant_id"]
1083 )))
1084
1085 # Check that auth metadata file for auth ID 'guest' is created.
1086 auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"])
1087 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
1088
1089 # Induce partial auth update state by modifying the auth metadata file,
1090 # and then run recovery procedure.
1091 self._volume_client_python(volumeclient_mount, dedent("""
1092 vp = VolumePath("{group_id}", "{volume_id}")
1093 auth_metadata = vc._auth_metadata_get("{auth_id}")
1094 auth_metadata['dirty'] = True
1095 vc._auth_metadata_set("{auth_id}", auth_metadata)
1096 vc.recover()
1097 """.format(
1098 group_id=group_id,
1099 volume_id=volume_id,
1100 auth_id=guestclient["auth_id"],
1101 )))
1102
1103 def test_put_object(self):
1104 vc_mount = self.mounts[1]
1105 vc_mount.umount_wait()
1106 self._configure_vc_auth(vc_mount, "manila")
1107
1108 obj_data = 'test data'
1109 obj_name = 'test_vc_obj_1'
1110 pool_name = self.fs.get_data_pool_names()[0]
1111
1112 self._volume_client_python(vc_mount, dedent("""
1113 vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}")
1114 """.format(
1115 pool_name = pool_name,
1116 obj_name = obj_name,
1117 obj_data = obj_data
1118 )))
1119
1120 read_data = self.fs.rados(['get', obj_name, '-'], pool=pool_name)
1121 self.assertEqual(obj_data, read_data)
1122
1123 def test_get_object(self):
1124 vc_mount = self.mounts[1]
1125 vc_mount.umount_wait()
1126 self._configure_vc_auth(vc_mount, "manila")
1127
1128 obj_data = 'test_data'
1129 obj_name = 'test_vc_ob_2'
1130 pool_name = self.fs.get_data_pool_names()[0]
1131
1132 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
1133
1134 self._volume_client_python(vc_mount, dedent("""
1135 data_read = vc.get_object("{pool_name}", "{obj_name}")
1136 assert data_read == b"{obj_data}"
1137 """.format(
1138 pool_name = pool_name,
1139 obj_name = obj_name,
1140 obj_data = obj_data
1141 )))
1142
1143 def test_put_object_versioned(self):
1144 vc_mount = self.mounts[1]
1145 vc_mount.umount_wait()
1146 self._configure_vc_auth(vc_mount, "manila")
1147
1148 obj_data = 'test_data'
1149 obj_name = 'test_vc_obj'
1150 pool_name = self.fs.get_data_pool_names()[0]
1151 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
1152
1153 self._volume_client_python(vc_mount, dedent("""
1154 data, version_before = vc.get_object_and_version("{pool_name}", "{obj_name}")
1155
1156 if sys_version_info.major < 3:
1157 data = data + 'modification1'
1158 elif sys_version_info.major > 3:
1159 data = str.encode(data.decode() + 'modification1')
1160
1161 vc.put_object_versioned("{pool_name}", "{obj_name}", data, version_before)
1162 data, version_after = vc.get_object_and_version("{pool_name}", "{obj_name}")
1163 assert version_after == version_before + 1
1164 """).format(pool_name=pool_name, obj_name=obj_name))
1165
1166 def test_version_check_for_put_object_versioned(self):
1167 vc_mount = self.mounts[1]
1168 vc_mount.umount_wait()
1169 self._configure_vc_auth(vc_mount, "manila")
1170
1171 obj_data = 'test_data'
1172 obj_name = 'test_vc_ob_2'
1173 pool_name = self.fs.get_data_pool_names()[0]
1174 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
1175
1176 # Test if put_object_versioned() crosschecks the version of the
1177 # given object. Being a negative test, an exception is expected.
1178 expected_exception = 'rados_OSError'
1179 output = self._volume_client_python(vc_mount, dedent("""
1180 data, version = vc.get_object_and_version("{pool_name}", "{obj_name}")
1181
1182 if sys_version_info.major < 3:
1183 data = data + 'm1'
1184 elif sys_version_info.major > 3:
1185 data = str.encode(data.decode('utf-8') + 'm1')
1186
1187 vc.put_object("{pool_name}", "{obj_name}", data)
1188
1189 if sys_version_info.major < 3:
1190 data = data + 'm2'
1191 elif sys_version_info.major > 3:
1192 data = str.encode(data.decode('utf-8') + 'm2')
1193
1194 try:
1195 vc.put_object_versioned("{pool_name}", "{obj_name}", data, version)
1196 except {expected_exception}:
1197 print('{expected_exception} raised')
1198 """).format(pool_name=pool_name, obj_name=obj_name,
1199 expected_exception=expected_exception))
1200 self.assertEqual(expected_exception + ' raised', output)
1201
1202
1203 def test_delete_object(self):
1204 vc_mount = self.mounts[1]
1205 vc_mount.umount_wait()
1206 self._configure_vc_auth(vc_mount, "manila")
1207
1208 obj_data = 'test data'
1209 obj_name = 'test_vc_obj_3'
1210 pool_name = self.fs.get_data_pool_names()[0]
1211
1212 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
1213
1214 self._volume_client_python(vc_mount, dedent("""
1215 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1216 """.format(
1217 pool_name = pool_name,
1218 obj_name = obj_name,
1219 )))
1220
1221 with self.assertRaises(CommandFailedError):
1222 self.fs.rados(['stat', obj_name], pool=pool_name)
1223
1224 # Check idempotency -- no error raised trying to delete non-existent
1225 # object
1226 self._volume_client_python(vc_mount, dedent("""
1227 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1228 """.format(
1229 pool_name = pool_name,
1230 obj_name = obj_name,
1231 )))
1232
1233 def test_21501(self):
1234 """
1235 Reproducer for #21501 "ceph_volume_client: sets invalid caps for
1236 existing IDs with no caps" (http://tracker.ceph.com/issues/21501)
1237 """
1238
1239 vc_mount = self.mounts[1]
1240 vc_mount.umount_wait()
1241
1242 # Configure vc_mount as the handle for driving volumeclient
1243 self._configure_vc_auth(vc_mount, "manila")
1244
1245 # Create a volume
1246 group_id = "grpid"
1247 volume_id = "volid"
1248 mount_path = self._volume_client_python(vc_mount, dedent("""
1249 vp = VolumePath("{group_id}", "{volume_id}")
1250 create_result = vc.create_volume(vp, 1024*1024*10)
1251 print(create_result['mount_path'])
1252 """.format(
1253 group_id=group_id,
1254 volume_id=volume_id
1255 )))
1256
1257 # Create an auth ID with no caps
1258 guest_id = '21501'
1259 self.fs.mon_manager.raw_cluster_cmd_result(
1260 'auth', 'get-or-create', 'client.{0}'.format(guest_id))
1261
1262 guest_mount = self.mounts[2]
1263 guest_mount.umount_wait()
1264
1265 # Set auth caps for the auth ID using the volumeclient
1266 self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path,
1267 allow_existing_id=True)
1268
1269 # Mount the volume in the guest using the auth ID to assert that the
1270 # auth caps are valid
1271 guest_mount.mount(mount_path=mount_path)
1272
1273 def test_volume_without_namespace_isolation(self):
1274 """
1275 That volume client can create volumes that do not have separate RADOS
1276 namespace layouts.
1277 """
1278 vc_mount = self.mounts[1]
1279 vc_mount.umount_wait()
1280
1281 # Configure vc_mount as the handle for driving volumeclient
1282 self._configure_vc_auth(vc_mount, "manila")
1283
1284 # Create a volume
1285 volume_prefix = "/myprefix"
1286 group_id = "grpid"
1287 volume_id = "volid"
1288 self._volume_client_python(vc_mount, dedent("""
1289 vp = VolumePath("{group_id}", "{volume_id}")
1290 create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
1291 print(create_result['mount_path'])
1292 """.format(
1293 group_id=group_id,
1294 volume_id=volume_id
1295 )), volume_prefix)
1296
1297 # The CephFS volume should be created
1298 self.mounts[0].stat(os.path.join("myprefix", group_id, volume_id))
1299 vol_namespace = self.mounts[0].getfattr(
1300 os.path.join("myprefix", group_id, volume_id),
1301 "ceph.dir.layout.pool_namespace")
1302 assert not vol_namespace
1303
1304 self._volume_client_python(vc_mount, dedent("""
1305 vp = VolumePath("{group_id}", "{volume_id}")
1306 vc.delete_volume(vp)
1307 vc.purge_volume(vp)
1308 """.format(
1309 group_id=group_id,
1310 volume_id=volume_id,
1311 )), volume_prefix)