]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
a6b4d10afb8b76889cfba8e915f4e92a9449930d
[ceph.git] / ceph / qa / tasks / cephfs / test_volume_client.py
1 import json
2 import logging
3 import os
4 from textwrap import dedent
5 from tasks.cephfs.cephfs_test_case import CephFSTestCase
6 from tasks.cephfs.fuse_mount import FuseMount
7 from teuthology.exceptions import CommandFailedError
8 from teuthology.misc import sudo_write_file
9
10 log = logging.getLogger(__name__)
11
12
13 class TestVolumeClient(CephFSTestCase):
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
16 CLIENTS_REQUIRED = 4
17 default_py_version = 'python3'
18
19 def setUp(self):
20 CephFSTestCase.setUp(self)
21 self.py_version = self.ctx.config.get('overrides', {}).\
22 get('python3', TestVolumeClient.default_py_version)
23 log.info("using python version: {python_version}".format(
24 python_version=self.py_version
25 ))
26
27 def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
28 # Can't dedent this *and* the script we pass in, because they might have different
29 # levels of indentation to begin with, so leave this string zero-indented
30 if vol_prefix:
31 vol_prefix = "\"" + vol_prefix + "\""
32 if ns_prefix:
33 ns_prefix = "\"" + ns_prefix + "\""
34 return client.run_python("""
35 from __future__ import print_function
36 from ceph_volume_client import CephFSVolumeClient, VolumePath
37 from sys import version_info as sys_version_info
38 from rados import OSError as rados_OSError
39 import logging
40 log = logging.getLogger("ceph_volume_client")
41 log.addHandler(logging.StreamHandler())
42 log.setLevel(logging.DEBUG)
43 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
44 vc.connect()
45 {payload}
46 vc.disconnect()
47 """.format(payload=script, conf_path=client.config_path,
48 vol_prefix=vol_prefix, ns_prefix=ns_prefix),
49 self.py_version)
50
51 def _configure_vc_auth(self, mount, id_name):
52 """
53 Set up auth credentials for the VolumeClient user
54 """
55 out = self.fs.mon_manager.raw_cluster_cmd(
56 "auth", "get-or-create", "client.{name}".format(name=id_name),
57 "mds", "allow *",
58 "osd", "allow rw",
59 "mon", "allow *"
60 )
61 mount.client_id = id_name
62 sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
63 self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
64
65 def _configure_guest_auth(self, volumeclient_mount, guest_mount,
66 guest_entity, mount_path,
67 namespace_prefix=None, readonly=False,
68 tenant_id=None):
69 """
70 Set up auth credentials for the guest client to mount a volume.
71
72 :param volumeclient_mount: mount used as the handle for driving
73 volumeclient.
74 :param guest_mount: mount used by the guest client.
75 :param guest_entity: auth ID used by the guest client.
76 :param mount_path: path of the volume.
77 :param namespace_prefix: name prefix of the RADOS namespace, which
78 is used for the volume's layout.
79 :param readonly: defaults to False. If set to 'True' only read-only
80 mount access is granted to the guest.
81 :param tenant_id: (OpenStack) tenant ID of the guest client.
82 """
83
84 head, volume_id = os.path.split(mount_path)
85 head, group_id = os.path.split(head)
86 head, volume_prefix = os.path.split(head)
87 volume_prefix = "/" + volume_prefix
88
89 # Authorize the guest client's auth ID to mount the volume.
90 key = self._volume_client_python(volumeclient_mount, dedent("""
91 vp = VolumePath("{group_id}", "{volume_id}")
92 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
93 tenant_id="{tenant_id}")
94 print(auth_result['auth_key'])
95 """.format(
96 group_id=group_id,
97 volume_id=volume_id,
98 guest_entity=guest_entity,
99 readonly=readonly,
100 tenant_id=tenant_id)), volume_prefix, namespace_prefix
101 )
102
103 # CephFSVolumeClient's authorize() does not return the secret
104 # key to a caller who isn't multi-tenant aware. Explicitly
105 # query the key for such a client.
106 if not tenant_id:
107 key = self.fs.mon_manager.raw_cluster_cmd(
108 "auth", "get-key", "client.{name}".format(name=guest_entity),
109 )
110
111 # The guest auth ID should exist.
112 existing_ids = [a['entity'] for a in self.auth_list()]
113 self.assertIn("client.{0}".format(guest_entity), existing_ids)
114
115 # Create keyring file for the guest client.
116 keyring_txt = dedent("""
117 [client.{guest_entity}]
118 key = {key}
119
120 """.format(
121 guest_entity=guest_entity,
122 key=key
123 ))
124 guest_mount.client_id = guest_entity
125 sudo_write_file(guest_mount.client_remote,
126 guest_mount.get_keyring_path(), keyring_txt)
127
128 # Add a guest client section to the ceph config file.
129 self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
130 self.set_conf("client.{0}".format(guest_entity), "debug client", "20")
131 self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20")
132 self.set_conf("client.{0}".format(guest_entity),
133 "keyring", guest_mount.get_keyring_path())
134
135 def test_default_prefix(self):
136 group_id = "grpid"
137 volume_id = "volid"
138 DEFAULT_VOL_PREFIX = "volumes"
139 DEFAULT_NS_PREFIX = "fsvolumens_"
140
141 self.mount_b.umount_wait()
142 self._configure_vc_auth(self.mount_b, "manila")
143
144 #create a volume with default prefix
145 self._volume_client_python(self.mount_b, dedent("""
146 vp = VolumePath("{group_id}", "{volume_id}")
147 vc.create_volume(vp, 10, data_isolated=True)
148 """.format(
149 group_id=group_id,
150 volume_id=volume_id,
151 )))
152
153 # The dir should be created
154 self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id))
155
156 #namespace should be set
157 ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace")
158 namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id)
159 self.assertEqual(namespace, ns_in_attr)
160
161
162 def test_lifecycle(self):
163 """
164 General smoke test for create, extend, destroy
165 """
166
167 # I'm going to use mount_c later as a guest for mounting the created
168 # shares
169 self.mounts[2].umount_wait()
170
171 # I'm going to leave mount_b unmounted and just use it as a handle for
172 # driving volumeclient. It's a little hacky but we don't have a more
173 # general concept for librados/libcephfs clients as opposed to full
174 # blown mounting clients.
175 self.mount_b.umount_wait()
176 self._configure_vc_auth(self.mount_b, "manila")
177
178 guest_entity = "guest"
179 group_id = "grpid"
180 volume_id = "volid"
181
182 volume_prefix = "/myprefix"
183 namespace_prefix = "mynsprefix_"
184
185 # Create a 100MB volume
186 volume_size = 100
187 mount_path = self._volume_client_python(self.mount_b, dedent("""
188 vp = VolumePath("{group_id}", "{volume_id}")
189 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
190 print(create_result['mount_path'])
191 """.format(
192 group_id=group_id,
193 volume_id=volume_id,
194 volume_size=volume_size
195 )), volume_prefix, namespace_prefix)
196
197 # The dir should be created
198 self.mount_a.stat(os.path.join("myprefix", group_id, volume_id))
199
200 # Authorize and configure credentials for the guest to mount the
201 # the volume.
202 self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
203 mount_path, namespace_prefix)
204 self.mounts[2].mount(mount_path=mount_path)
205
206 # The kernel client doesn't have the quota-based df behaviour,
207 # or quotas at all, so only exercise the client behaviour when
208 # running fuse.
209 if isinstance(self.mounts[2], FuseMount):
210 # df should see volume size, same as the quota set on volume's dir
211 self.assertEqual(self.mounts[2].df()['total'],
212 volume_size * 1024 * 1024)
213 self.assertEqual(
214 self.mount_a.getfattr(
215 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
216 "ceph.quota.max_bytes"),
217 "%s" % (volume_size * 1024 * 1024))
218
219 # df granularity is 4MB block so have to write at least that much
220 data_bin_mb = 4
221 self.mounts[2].write_n_mb("data.bin", data_bin_mb)
222
223 # Write something outside volume to check this space usage is
224 # not reported in the volume's DF.
225 other_bin_mb = 8
226 self.mount_a.write_n_mb("other.bin", other_bin_mb)
227
228 # global: df should see all the writes (data + other). This is a >
229 # rather than a == because the global spaced used includes all pools
230 def check_df():
231 used = self.mount_a.df()['used']
232 return used >= (other_bin_mb * 1024 * 1024)
233
234 self.wait_until_true(check_df, timeout=30)
235
236 # Hack: do a metadata IO to kick rstats
237 self.mounts[2].run_shell(["touch", "foo"])
238
239 # volume: df should see the data_bin_mb consumed from quota, same
240 # as the rbytes for the volume's dir
241 self.wait_until_equal(
242 lambda: self.mounts[2].df()['used'],
243 data_bin_mb * 1024 * 1024, timeout=60)
244 self.wait_until_equal(
245 lambda: self.mount_a.getfattr(
246 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
247 "ceph.dir.rbytes"),
248 "%s" % (data_bin_mb * 1024 * 1024), timeout=60)
249
250 # sync so that file data are persist to rados
251 self.mounts[2].run_shell(["sync"])
252
253 # Our data should stay in particular rados namespace
254 pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool")
255 namespace = "{0}{1}".format(namespace_prefix, volume_id)
256 ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace")
257 self.assertEqual(namespace, ns_in_attr)
258
259 objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n"))
260 self.assertNotEqual(objects_in_ns, set())
261
262 # De-authorize the guest
263 self._volume_client_python(self.mount_b, dedent("""
264 vp = VolumePath("{group_id}", "{volume_id}")
265 vc.deauthorize(vp, "{guest_entity}")
266 vc.evict("{guest_entity}")
267 """.format(
268 group_id=group_id,
269 volume_id=volume_id,
270 guest_entity=guest_entity
271 )), volume_prefix, namespace_prefix)
272
273 # Once deauthorized, the client should be unable to do any more metadata ops
274 # The way that the client currently behaves here is to block (it acts like
275 # it has lost network, because there is nothing to tell it that is messages
276 # are being dropped because it's identity is gone)
277 background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False)
278 try:
279 background.wait()
280 except CommandFailedError:
281 # command failed with EBLACKLISTED?
282 if "transport endpoint shutdown" in background.stderr.getvalue():
283 pass
284 else:
285 raise
286
287 # After deauthorisation, the client ID should be gone (this was the only
288 # volume it was authorised for)
289 self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()])
290
291 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
292 self.mounts[2].umount_wait()
293
294 self._volume_client_python(self.mount_b, dedent("""
295 vp = VolumePath("{group_id}", "{volume_id}")
296 vc.delete_volume(vp)
297 vc.purge_volume(vp)
298 """.format(
299 group_id=group_id,
300 volume_id=volume_id,
301 )), volume_prefix, namespace_prefix)
302
303 def test_idempotency(self):
304 """
305 That the volumeclient interface works when calling everything twice
306 """
307 self.mount_b.umount_wait()
308 self._configure_vc_auth(self.mount_b, "manila")
309
310 guest_entity = "guest"
311 group_id = "grpid"
312 volume_id = "volid"
313 self._volume_client_python(self.mount_b, dedent("""
314 vp = VolumePath("{group_id}", "{volume_id}")
315 vc.create_volume(vp, 10)
316 vc.create_volume(vp, 10)
317 vc.authorize(vp, "{guest_entity}")
318 vc.authorize(vp, "{guest_entity}")
319 vc.deauthorize(vp, "{guest_entity}")
320 vc.deauthorize(vp, "{guest_entity}")
321 vc.delete_volume(vp)
322 vc.delete_volume(vp)
323 vc.purge_volume(vp)
324 vc.purge_volume(vp)
325
326 vc.create_volume(vp, 10, data_isolated=True)
327 vc.create_volume(vp, 10, data_isolated=True)
328 vc.authorize(vp, "{guest_entity}")
329 vc.authorize(vp, "{guest_entity}")
330 vc.deauthorize(vp, "{guest_entity}")
331 vc.deauthorize(vp, "{guest_entity}")
332 vc.evict("{guest_entity}")
333 vc.evict("{guest_entity}")
334 vc.delete_volume(vp, data_isolated=True)
335 vc.delete_volume(vp, data_isolated=True)
336 vc.purge_volume(vp, data_isolated=True)
337 vc.purge_volume(vp, data_isolated=True)
338
339 vc.create_volume(vp, 10, namespace_isolated=False)
340 vc.create_volume(vp, 10, namespace_isolated=False)
341 vc.authorize(vp, "{guest_entity}")
342 vc.authorize(vp, "{guest_entity}")
343 vc.deauthorize(vp, "{guest_entity}")
344 vc.deauthorize(vp, "{guest_entity}")
345 vc.evict("{guest_entity}")
346 vc.evict("{guest_entity}")
347 vc.delete_volume(vp)
348 vc.delete_volume(vp)
349 vc.purge_volume(vp)
350 vc.purge_volume(vp)
351 """.format(
352 group_id=group_id,
353 volume_id=volume_id,
354 guest_entity=guest_entity
355 )))
356
357 def test_data_isolated(self):
358 """
359 That data isolated shares get their own pool
360 :return:
361 """
362
363 self.mount_b.umount_wait()
364 self._configure_vc_auth(self.mount_b, "manila")
365
366 pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
367
368 group_id = "grpid"
369 volume_id = "volid"
370 self._volume_client_python(self.mount_b, dedent("""
371 vp = VolumePath("{group_id}", "{volume_id}")
372 vc.create_volume(vp, data_isolated=True)
373 """.format(
374 group_id=group_id,
375 volume_id=volume_id,
376 )))
377
378 pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
379
380 # Should have created one new pool
381 new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
382 self.assertEqual(len(new_pools), 1)
383
384 def test_15303(self):
385 """
386 Reproducer for #15303 "Client holds incorrect complete flag on dir
387 after losing caps" (http://tracker.ceph.com/issues/15303)
388 """
389 for m in self.mounts:
390 m.umount_wait()
391
392 # Create a dir on mount A
393 self.mount_a.mount()
394 self.mount_a.run_shell(["mkdir", "parent1"])
395 self.mount_a.run_shell(["mkdir", "parent2"])
396 self.mount_a.run_shell(["mkdir", "parent1/mydir"])
397
398 # Put some files in it from mount B
399 self.mount_b.mount()
400 self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
401 self.mount_b.umount_wait()
402
403 # List the dir's contents on mount A
404 self.assertListEqual(self.mount_a.ls("parent1/mydir"),
405 ["afile"])
406
407 def test_evict_client(self):
408 """
409 That a volume client can be evicted based on its auth ID and the volume
410 path it has mounted.
411 """
412
413 if not isinstance(self.mount_a, FuseMount):
414 self.skipTest("Requires FUSE client to inject client metadata")
415
416 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
417 # and mounts[3] would be used as guests to mount the volumes/shares.
418
419 for i in range(1, 4):
420 self.mounts[i].umount_wait()
421
422 volumeclient_mount = self.mounts[1]
423 self._configure_vc_auth(volumeclient_mount, "manila")
424 guest_mounts = (self.mounts[2], self.mounts[3])
425
426 guest_entity = "guest"
427 group_id = "grpid"
428 mount_paths = []
429 volume_ids = []
430
431 # Create two volumes. Authorize 'guest' auth ID to mount the two
432 # volumes. Mount the two volumes. Write data to the volumes.
433 for i in range(2):
434 # Create volume.
435 volume_ids.append("volid_{0}".format(str(i)))
436 mount_paths.append(
437 self._volume_client_python(volumeclient_mount, dedent("""
438 vp = VolumePath("{group_id}", "{volume_id}")
439 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
440 print(create_result['mount_path'])
441 """.format(
442 group_id=group_id,
443 volume_id=volume_ids[i]
444 ))))
445
446 # Authorize 'guest' auth ID to mount the volume.
447 self._configure_guest_auth(volumeclient_mount, guest_mounts[i],
448 guest_entity, mount_paths[i])
449
450 # Mount the volume.
451 guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format(
452 id=guest_entity, suffix=str(i))
453 guest_mounts[i].mount(mount_path=mount_paths[i])
454 guest_mounts[i].write_n_mb("data.bin", 1)
455
456
457 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
458 # one volume.
459 self._volume_client_python(self.mount_b, dedent("""
460 vp = VolumePath("{group_id}", "{volume_id}")
461 vc.deauthorize(vp, "{guest_entity}")
462 vc.evict("{guest_entity}", volume_path=vp)
463 """.format(
464 group_id=group_id,
465 volume_id=volume_ids[0],
466 guest_entity=guest_entity
467 )))
468
469 # Evicted guest client, guest_mounts[0], should not be able to do
470 # anymore metadata ops. It should start failing all operations
471 # when it sees that its own address is in the blacklist.
472 try:
473 guest_mounts[0].write_n_mb("rogue.bin", 1)
474 except CommandFailedError:
475 pass
476 else:
477 raise RuntimeError("post-eviction write should have failed!")
478
479 # The blacklisted guest client should now be unmountable
480 guest_mounts[0].umount_wait()
481
482 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
483 # has mounted the other volume, should be able to use its volume
484 # unaffected.
485 guest_mounts[1].write_n_mb("data.bin.1", 1)
486
487 # Cleanup.
488 for i in range(2):
489 self._volume_client_python(volumeclient_mount, dedent("""
490 vp = VolumePath("{group_id}", "{volume_id}")
491 vc.deauthorize(vp, "{guest_entity}")
492 vc.delete_volume(vp)
493 vc.purge_volume(vp)
494 """.format(
495 group_id=group_id,
496 volume_id=volume_ids[i],
497 guest_entity=guest_entity
498 )))
499
500
501 def test_purge(self):
502 """
503 Reproducer for #15266, exception trying to purge volumes that
504 contain non-ascii filenames.
505
506 Additionally test any other purge corner cases here.
507 """
508 # I'm going to leave mount_b unmounted and just use it as a handle for
509 # driving volumeclient. It's a little hacky but we don't have a more
510 # general concept for librados/libcephfs clients as opposed to full
511 # blown mounting clients.
512 self.mount_b.umount_wait()
513 self._configure_vc_auth(self.mount_b, "manila")
514
515 group_id = "grpid"
516 # Use a unicode volume ID (like Manila), to reproduce #15266
517 volume_id = u"volid"
518
519 # Create
520 mount_path = self._volume_client_python(self.mount_b, dedent("""
521 vp = VolumePath("{group_id}", u"{volume_id}")
522 create_result = vc.create_volume(vp, 10)
523 print(create_result['mount_path'])
524 """.format(
525 group_id=group_id,
526 volume_id=volume_id
527 )))
528
529 # Strip leading "/"
530 mount_path = mount_path[1:]
531
532 # A file with non-ascii characters
533 self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")])
534
535 # A file with no permissions to do anything
536 self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")])
537 self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")])
538
539 self._volume_client_python(self.mount_b, dedent("""
540 vp = VolumePath("{group_id}", u"{volume_id}")
541 vc.delete_volume(vp)
542 vc.purge_volume(vp)
543 """.format(
544 group_id=group_id,
545 volume_id=volume_id
546 )))
547
548 # Check it's really gone
549 self.assertEqual(self.mount_a.ls("volumes/_deleting"), [])
550 self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id])
551
552 def test_readonly_authorization(self):
553 """
554 That guest clients can be restricted to read-only mounts of volumes.
555 """
556
557 volumeclient_mount = self.mounts[1]
558 guest_mount = self.mounts[2]
559 volumeclient_mount.umount_wait()
560 guest_mount.umount_wait()
561
562 # Configure volumeclient_mount as the handle for driving volumeclient.
563 self._configure_vc_auth(volumeclient_mount, "manila")
564
565 guest_entity = "guest"
566 group_id = "grpid"
567 volume_id = "volid"
568
569 # Create a volume.
570 mount_path = self._volume_client_python(volumeclient_mount, dedent("""
571 vp = VolumePath("{group_id}", "{volume_id}")
572 create_result = vc.create_volume(vp, 1024*1024*10)
573 print(create_result['mount_path'])
574 """.format(
575 group_id=group_id,
576 volume_id=volume_id,
577 )))
578
579 # Authorize and configure credentials for the guest to mount the
580 # the volume with read-write access.
581 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
582 mount_path, readonly=False)
583
584 # Mount the volume, and write to it.
585 guest_mount.mount(mount_path=mount_path)
586 guest_mount.write_n_mb("data.bin", 1)
587
588 # Change the guest auth ID's authorization to read-only mount access.
589 self._volume_client_python(volumeclient_mount, dedent("""
590 vp = VolumePath("{group_id}", "{volume_id}")
591 vc.deauthorize(vp, "{guest_entity}")
592 """.format(
593 group_id=group_id,
594 volume_id=volume_id,
595 guest_entity=guest_entity
596 )))
597 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
598 mount_path, readonly=True)
599
600 # The effect of the change in access level to read-only is not
601 # immediate. The guest sees the change only after a remount of
602 # the volume.
603 guest_mount.umount_wait()
604 guest_mount.mount(mount_path=mount_path)
605
606 # Read existing content of the volume.
607 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
608 # Cannot write into read-only volume.
609 with self.assertRaises(CommandFailedError):
610 guest_mount.write_n_mb("rogue.bin", 1)
611
612 def test_get_authorized_ids(self):
613 """
614 That for a volume, the authorized IDs and their access levels
615 can be obtained using CephFSVolumeClient's get_authorized_ids().
616 """
617 volumeclient_mount = self.mounts[1]
618 volumeclient_mount.umount_wait()
619
620 # Configure volumeclient_mount as the handle for driving volumeclient.
621 self._configure_vc_auth(volumeclient_mount, "manila")
622
623 group_id = "grpid"
624 volume_id = "volid"
625 guest_entity_1 = "guest1"
626 guest_entity_2 = "guest2"
627
628 log.info("print(group ID: {0})".format(group_id))
629
630 # Create a volume.
631 auths = self._volume_client_python(volumeclient_mount, dedent("""
632 vp = VolumePath("{group_id}", "{volume_id}")
633 vc.create_volume(vp, 1024*1024*10)
634 auths = vc.get_authorized_ids(vp)
635 print(auths)
636 """.format(
637 group_id=group_id,
638 volume_id=volume_id,
639 )))
640 # Check the list of authorized IDs for the volume.
641 self.assertEqual('None', auths)
642
643 # Allow two auth IDs access to the volume.
644 auths = self._volume_client_python(volumeclient_mount, dedent("""
645 vp = VolumePath("{group_id}", "{volume_id}")
646 vc.authorize(vp, "{guest_entity_1}", readonly=False)
647 vc.authorize(vp, "{guest_entity_2}", readonly=True)
648 auths = vc.get_authorized_ids(vp)
649 print(auths)
650 """.format(
651 group_id=group_id,
652 volume_id=volume_id,
653 guest_entity_1=guest_entity_1,
654 guest_entity_2=guest_entity_2,
655 )))
656 # Check the list of authorized IDs and their access levels.
657 if self.py_version == 'python3':
658 expected_result = [('guest1', 'rw'), ('guest2', 'r')]
659 else:
660 expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
661
662 self.assertItemsEqual(str(expected_result), auths)
663
664 # Disallow both the auth IDs' access to the volume.
665 auths = self._volume_client_python(volumeclient_mount, dedent("""
666 vp = VolumePath("{group_id}", "{volume_id}")
667 vc.deauthorize(vp, "{guest_entity_1}")
668 vc.deauthorize(vp, "{guest_entity_2}")
669 auths = vc.get_authorized_ids(vp)
670 print(auths)
671 """.format(
672 group_id=group_id,
673 volume_id=volume_id,
674 guest_entity_1=guest_entity_1,
675 guest_entity_2=guest_entity_2,
676 )))
677 # Check the list of authorized IDs for the volume.
678 self.assertEqual('None', auths)
679
680 def test_multitenant_volumes(self):
681 """
682 That volume access can be restricted to a tenant.
683
684 That metadata used to enforce tenant isolation of
685 volumes is stored as a two-way mapping between auth
686 IDs and volumes that they're authorized to access.
687 """
688 volumeclient_mount = self.mounts[1]
689 volumeclient_mount.umount_wait()
690
691 # Configure volumeclient_mount as the handle for driving volumeclient.
692 self._configure_vc_auth(volumeclient_mount, "manila")
693
694 group_id = "groupid"
695 volume_id = "volumeid"
696
697 # Guest clients belonging to different tenants, but using the same
698 # auth ID.
699 auth_id = "guest"
700 guestclient_1 = {
701 "auth_id": auth_id,
702 "tenant_id": "tenant1",
703 }
704 guestclient_2 = {
705 "auth_id": auth_id,
706 "tenant_id": "tenant2",
707 }
708
709 # Create a volume.
710 self._volume_client_python(volumeclient_mount, dedent("""
711 vp = VolumePath("{group_id}", "{volume_id}")
712 vc.create_volume(vp, 1024*1024*10)
713 """.format(
714 group_id=group_id,
715 volume_id=volume_id,
716 )))
717
718 # Check that volume metadata file is created on volume creation.
719 vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id)
720 self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
721
722 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
723 # 'tenant1', with 'rw' access to the volume.
724 self._volume_client_python(volumeclient_mount, dedent("""
725 vp = VolumePath("{group_id}", "{volume_id}")
726 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
727 """.format(
728 group_id=group_id,
729 volume_id=volume_id,
730 auth_id=guestclient_1["auth_id"],
731 tenant_id=guestclient_1["tenant_id"]
732 )))
733
734 # Check that auth metadata file for auth ID 'guest', is
735 # created on authorizing 'guest' access to the volume.
736 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
737 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
738
739 # Verify that the auth metadata file stores the tenant ID that the
740 # auth ID belongs to, the auth ID's authorized access levels
741 # for different volumes, versioning details, etc.
742 expected_auth_metadata = {
743 "version": 2,
744 "compat_version": 1,
745 "dirty": False,
746 "tenant_id": "tenant1",
747 "volumes": {
748 "groupid/volumeid": {
749 "dirty": False,
750 "access_level": "rw"
751 }
752 }
753 }
754
755 auth_metadata = self._volume_client_python(volumeclient_mount, dedent("""
756 import json
757 vp = VolumePath("{group_id}", "{volume_id}")
758 auth_metadata = vc._auth_metadata_get("{auth_id}")
759 print(json.dumps(auth_metadata))
760 """.format(
761 group_id=group_id,
762 volume_id=volume_id,
763 auth_id=guestclient_1["auth_id"],
764 )))
765 auth_metadata = json.loads(auth_metadata)
766
767 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
768 del expected_auth_metadata["version"]
769 del auth_metadata["version"]
770 self.assertEqual(expected_auth_metadata, auth_metadata)
771
772 # Verify that the volume metadata file stores info about auth IDs
773 # and their access levels to the volume, versioning details, etc.
774 expected_vol_metadata = {
775 "version": 2,
776 "compat_version": 1,
777 "auths": {
778 "guest": {
779 "dirty": False,
780 "access_level": "rw"
781 }
782 }
783 }
784
785 vol_metadata = self._volume_client_python(volumeclient_mount, dedent("""
786 import json
787 vp = VolumePath("{group_id}", "{volume_id}")
788 volume_metadata = vc._volume_metadata_get(vp)
789 print(json.dumps(volume_metadata))
790 """.format(
791 group_id=group_id,
792 volume_id=volume_id,
793 )))
794 vol_metadata = json.loads(vol_metadata)
795
796 self.assertGreaterEqual(vol_metadata["version"], expected_vol_metadata["version"])
797 del expected_vol_metadata["version"]
798 del vol_metadata["version"]
799 self.assertEqual(expected_vol_metadata, vol_metadata)
800
801 # Cannot authorize 'guestclient_2' to access the volume.
802 # It uses auth ID 'guest', which has already been used by a
803 # 'guestclient_1' belonging to an another tenant for accessing
804 # the volume.
805 with self.assertRaises(CommandFailedError):
806 self._volume_client_python(volumeclient_mount, dedent("""
807 vp = VolumePath("{group_id}", "{volume_id}")
808 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
809 """.format(
810 group_id=group_id,
811 volume_id=volume_id,
812 auth_id=guestclient_2["auth_id"],
813 tenant_id=guestclient_2["tenant_id"]
814 )))
815
816 # Check that auth metadata file is cleaned up on removing
817 # auth ID's only access to a volume.
818 self._volume_client_python(volumeclient_mount, dedent("""
819 vp = VolumePath("{group_id}", "{volume_id}")
820 vc.deauthorize(vp, "{guest_entity}")
821 """.format(
822 group_id=group_id,
823 volume_id=volume_id,
824 guest_entity=guestclient_1["auth_id"]
825 )))
826
827 self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
828
829 # Check that volume metadata file is cleaned up on volume deletion.
830 self._volume_client_python(volumeclient_mount, dedent("""
831 vp = VolumePath("{group_id}", "{volume_id}")
832 vc.delete_volume(vp)
833 """.format(
834 group_id=group_id,
835 volume_id=volume_id,
836 )))
837 self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
838
839 def test_recover_metadata(self):
840 """
841 That volume client can recover from partial auth updates using
842 metadata files, which store auth info and its update status info.
843 """
844 volumeclient_mount = self.mounts[1]
845 volumeclient_mount.umount_wait()
846
847 # Configure volumeclient_mount as the handle for driving volumeclient.
848 self._configure_vc_auth(volumeclient_mount, "manila")
849
850 group_id = "groupid"
851 volume_id = "volumeid"
852
853 guestclient = {
854 "auth_id": "guest",
855 "tenant_id": "tenant",
856 }
857
858 # Create a volume.
859 self._volume_client_python(volumeclient_mount, dedent("""
860 vp = VolumePath("{group_id}", "{volume_id}")
861 vc.create_volume(vp, 1024*1024*10)
862 """.format(
863 group_id=group_id,
864 volume_id=volume_id,
865 )))
866
867 # Authorize 'guestclient' access to the volume.
868 self._volume_client_python(volumeclient_mount, dedent("""
869 vp = VolumePath("{group_id}", "{volume_id}")
870 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
871 """.format(
872 group_id=group_id,
873 volume_id=volume_id,
874 auth_id=guestclient["auth_id"],
875 tenant_id=guestclient["tenant_id"]
876 )))
877
878 # Check that auth metadata file for auth ID 'guest' is created.
879 auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"])
880 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
881
882 # Induce partial auth update state by modifying the auth metadata file,
883 # and then run recovery procedure.
884 self._volume_client_python(volumeclient_mount, dedent("""
885 vp = VolumePath("{group_id}", "{volume_id}")
886 auth_metadata = vc._auth_metadata_get("{auth_id}")
887 auth_metadata['dirty'] = True
888 vc._auth_metadata_set("{auth_id}", auth_metadata)
889 vc.recover()
890 """.format(
891 group_id=group_id,
892 volume_id=volume_id,
893 auth_id=guestclient["auth_id"],
894 )))
895
896 def test_put_object(self):
897 vc_mount = self.mounts[1]
898 vc_mount.umount_wait()
899 self._configure_vc_auth(vc_mount, "manila")
900
901 obj_data = 'test data'
902 obj_name = 'test_vc_obj_1'
903 pool_name = self.fs.get_data_pool_names()[0]
904
905 self._volume_client_python(vc_mount, dedent("""
906 vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}")
907 """.format(
908 pool_name = pool_name,
909 obj_name = obj_name,
910 obj_data = obj_data
911 )))
912
913 read_data = self.fs.rados(['get', obj_name, '-'], pool=pool_name)
914 self.assertEqual(obj_data, read_data)
915
916 def test_get_object(self):
917 vc_mount = self.mounts[1]
918 vc_mount.umount_wait()
919 self._configure_vc_auth(vc_mount, "manila")
920
921 obj_data = 'test_data'
922 obj_name = 'test_vc_ob_2'
923 pool_name = self.fs.get_data_pool_names()[0]
924
925 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
926
927 self._volume_client_python(vc_mount, dedent("""
928 data_read = vc.get_object("{pool_name}", "{obj_name}")
929 assert data_read == b"{obj_data}"
930 """.format(
931 pool_name = pool_name,
932 obj_name = obj_name,
933 obj_data = obj_data
934 )))
935
936 def test_put_object_versioned(self):
937 vc_mount = self.mounts[1]
938 vc_mount.umount_wait()
939 self._configure_vc_auth(vc_mount, "manila")
940
941 obj_data = 'test_data'
942 obj_name = 'test_vc_obj'
943 pool_name = self.fs.get_data_pool_names()[0]
944 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
945
946 self._volume_client_python(vc_mount, dedent("""
947 data, version_before = vc.get_object_and_version("{pool_name}", "{obj_name}")
948
949 if sys_version_info.major < 3:
950 data = data + 'modification1'
951 elif sys_version_info.major > 3:
952 data = str.encode(data.decode() + 'modification1')
953
954 vc.put_object_versioned("{pool_name}", "{obj_name}", data, version_before)
955 data, version_after = vc.get_object_and_version("{pool_name}", "{obj_name}")
956 assert version_after == version_before + 1
957 """).format(pool_name=pool_name, obj_name=obj_name))
958
959 def test_version_check_for_put_object_versioned(self):
960 vc_mount = self.mounts[1]
961 vc_mount.umount_wait()
962 self._configure_vc_auth(vc_mount, "manila")
963
964 obj_data = 'test_data'
965 obj_name = 'test_vc_ob_2'
966 pool_name = self.fs.get_data_pool_names()[0]
967 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
968
969 # Test if put_object_versioned() crosschecks the version of the
970 # given object. Being a negative test, an exception is expected.
971 expected_exception = 'rados_OSError'
972 output = self._volume_client_python(vc_mount, dedent("""
973 data, version = vc.get_object_and_version("{pool_name}", "{obj_name}")
974
975 if sys_version_info.major < 3:
976 data = data + 'm1'
977 elif sys_version_info.major > 3:
978 data = str.encode(data.decode('utf-8') + 'm1')
979
980 vc.put_object("{pool_name}", "{obj_name}", data)
981
982 if sys_version_info.major < 3:
983 data = data + 'm2'
984 elif sys_version_info.major > 3:
985 data = str.encode(data.decode('utf-8') + 'm2')
986
987 try:
988 vc.put_object_versioned("{pool_name}", "{obj_name}", data, version)
989 except {expected_exception}:
990 print('{expected_exception} raised')
991 """).format(pool_name=pool_name, obj_name=obj_name,
992 expected_exception=expected_exception))
993 self.assertEqual(expected_exception + ' raised', output)
994
995
996 def test_delete_object(self):
997 vc_mount = self.mounts[1]
998 vc_mount.umount_wait()
999 self._configure_vc_auth(vc_mount, "manila")
1000
1001 obj_data = 'test data'
1002 obj_name = 'test_vc_obj_3'
1003 pool_name = self.fs.get_data_pool_names()[0]
1004
1005 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
1006
1007 self._volume_client_python(vc_mount, dedent("""
1008 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1009 """.format(
1010 pool_name = pool_name,
1011 obj_name = obj_name,
1012 )))
1013
1014 with self.assertRaises(CommandFailedError):
1015 self.fs.rados(['stat', obj_name], pool=pool_name)
1016
1017 # Check idempotency -- no error raised trying to delete non-existent
1018 # object
1019 self._volume_client_python(vc_mount, dedent("""
1020 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1021 """.format(
1022 pool_name = pool_name,
1023 obj_name = obj_name,
1024 )))
1025
1026 def test_21501(self):
1027 """
1028 Reproducer for #21501 "ceph_volume_client: sets invalid caps for
1029 existing IDs with no caps" (http://tracker.ceph.com/issues/21501)
1030 """
1031
1032 vc_mount = self.mounts[1]
1033 vc_mount.umount_wait()
1034
1035 # Configure vc_mount as the handle for driving volumeclient
1036 self._configure_vc_auth(vc_mount, "manila")
1037
1038 # Create a volume
1039 group_id = "grpid"
1040 volume_id = "volid"
1041 mount_path = self._volume_client_python(vc_mount, dedent("""
1042 vp = VolumePath("{group_id}", "{volume_id}")
1043 create_result = vc.create_volume(vp, 1024*1024*10)
1044 print(create_result['mount_path'])
1045 """.format(
1046 group_id=group_id,
1047 volume_id=volume_id
1048 )))
1049
1050 # Create an auth ID with no caps
1051 guest_id = '21501'
1052 self.fs.mon_manager.raw_cluster_cmd_result(
1053 'auth', 'get-or-create', 'client.{0}'.format(guest_id))
1054
1055 guest_mount = self.mounts[2]
1056 guest_mount.umount_wait()
1057
1058 # Set auth caps for the auth ID using the volumeclient
1059 self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path)
1060
1061 # Mount the volume in the guest using the auth ID to assert that the
1062 # auth caps are valid
1063 guest_mount.mount(mount_path=mount_path)
1064
1065 def test_volume_without_namespace_isolation(self):
1066 """
1067 That volume client can create volumes that do not have separate RADOS
1068 namespace layouts.
1069 """
1070 vc_mount = self.mounts[1]
1071 vc_mount.umount_wait()
1072
1073 # Configure vc_mount as the handle for driving volumeclient
1074 self._configure_vc_auth(vc_mount, "manila")
1075
1076 # Create a volume
1077 volume_prefix = "/myprefix"
1078 group_id = "grpid"
1079 volume_id = "volid"
1080 self._volume_client_python(vc_mount, dedent("""
1081 vp = VolumePath("{group_id}", "{volume_id}")
1082 create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
1083 print(create_result['mount_path'])
1084 """.format(
1085 group_id=group_id,
1086 volume_id=volume_id
1087 )), volume_prefix)
1088
1089 # The CephFS volume should be created
1090 self.mounts[0].stat(os.path.join("myprefix", group_id, volume_id))
1091 vol_namespace = self.mounts[0].getfattr(
1092 os.path.join("myprefix", group_id, volume_id),
1093 "ceph.dir.layout.pool_namespace")
1094 assert not vol_namespace
1095
1096 self._volume_client_python(vc_mount, dedent("""
1097 vp = VolumePath("{group_id}", "{volume_id}")
1098 vc.delete_volume(vp)
1099 vc.purge_volume(vp)
1100 """.format(
1101 group_id=group_id,
1102 volume_id=volume_id,
1103 )), volume_prefix)