]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
import 14.2.4 nautilus point release
[ceph.git] / ceph / qa / tasks / cephfs / test_volume_client.py
1 import json
2 import logging
3 import time
4 import os
5 from textwrap import dedent
6 from tasks.cephfs.cephfs_test_case import CephFSTestCase
7 from tasks.cephfs.fuse_mount import FuseMount
8 from teuthology.exceptions import CommandFailedError
9
10 log = logging.getLogger(__name__)
11
12
13 class TestVolumeClient(CephFSTestCase):
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
16 CLIENTS_REQUIRED = 4
17 py_version = 'python'
18
19 def setUp(self):
20 CephFSTestCase.setUp(self)
21 self.py_version = self.ctx.config.get('overrides', {}).get('python', 'python')
22 log.info("using python version: {python_version}".format(
23 python_version=self.py_version
24 ))
25
26 def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
27 # Can't dedent this *and* the script we pass in, because they might have different
28 # levels of indentation to begin with, so leave this string zero-indented
29 if vol_prefix:
30 vol_prefix = "\"" + vol_prefix + "\""
31 if ns_prefix:
32 ns_prefix = "\"" + ns_prefix + "\""
33 return client.run_python("""
34 from __future__ import print_function
35 from ceph_volume_client import CephFSVolumeClient, VolumePath
36 import logging
37 log = logging.getLogger("ceph_volume_client")
38 log.addHandler(logging.StreamHandler())
39 log.setLevel(logging.DEBUG)
40 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
41 vc.connect()
42 {payload}
43 vc.disconnect()
44 """.format(payload=script, conf_path=client.config_path,
45 vol_prefix=vol_prefix, ns_prefix=ns_prefix),
46 self.py_version)
47
48 def _sudo_write_file(self, remote, path, data):
49 """
50 Write data to a remote file as super user
51
52 :param remote: Remote site.
53 :param path: Path on the remote being written to.
54 :param data: Data to be written.
55
56 Both perms and owner are passed directly to chmod.
57 """
58 remote.run(
59 args=[
60 'sudo',
61 'python',
62 '-c',
63 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
64 path,
65 ],
66 stdin=data,
67 )
68
69 def _configure_vc_auth(self, mount, id_name):
70 """
71 Set up auth credentials for the VolumeClient user
72 """
73 out = self.fs.mon_manager.raw_cluster_cmd(
74 "auth", "get-or-create", "client.{name}".format(name=id_name),
75 "mds", "allow *",
76 "osd", "allow rw",
77 "mon", "allow *"
78 )
79 mount.client_id = id_name
80 self._sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
81 self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
82
83 def _configure_guest_auth(self, volumeclient_mount, guest_mount,
84 guest_entity, mount_path,
85 namespace_prefix=None, readonly=False,
86 tenant_id=None):
87 """
88 Set up auth credentials for the guest client to mount a volume.
89
90 :param volumeclient_mount: mount used as the handle for driving
91 volumeclient.
92 :param guest_mount: mount used by the guest client.
93 :param guest_entity: auth ID used by the guest client.
94 :param mount_path: path of the volume.
95 :param namespace_prefix: name prefix of the RADOS namespace, which
96 is used for the volume's layout.
97 :param readonly: defaults to False. If set to 'True' only read-only
98 mount access is granted to the guest.
99 :param tenant_id: (OpenStack) tenant ID of the guest client.
100 """
101
102 head, volume_id = os.path.split(mount_path)
103 head, group_id = os.path.split(head)
104 head, volume_prefix = os.path.split(head)
105 volume_prefix = "/" + volume_prefix
106
107 # Authorize the guest client's auth ID to mount the volume.
108 key = self._volume_client_python(volumeclient_mount, dedent("""
109 vp = VolumePath("{group_id}", "{volume_id}")
110 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
111 tenant_id="{tenant_id}")
112 print(auth_result['auth_key'])
113 """.format(
114 group_id=group_id,
115 volume_id=volume_id,
116 guest_entity=guest_entity,
117 readonly=readonly,
118 tenant_id=tenant_id)), volume_prefix, namespace_prefix
119 )
120
121 # CephFSVolumeClient's authorize() does not return the secret
122 # key to a caller who isn't multi-tenant aware. Explicitly
123 # query the key for such a client.
124 if not tenant_id:
125 key = self.fs.mon_manager.raw_cluster_cmd(
126 "auth", "get-key", "client.{name}".format(name=guest_entity),
127 )
128
129 # The guest auth ID should exist.
130 existing_ids = [a['entity'] for a in self.auth_list()]
131 self.assertIn("client.{0}".format(guest_entity), existing_ids)
132
133 # Create keyring file for the guest client.
134 keyring_txt = dedent("""
135 [client.{guest_entity}]
136 key = {key}
137
138 """.format(
139 guest_entity=guest_entity,
140 key=key
141 ))
142 guest_mount.client_id = guest_entity
143 self._sudo_write_file(guest_mount.client_remote,
144 guest_mount.get_keyring_path(),
145 keyring_txt)
146
147 # Add a guest client section to the ceph config file.
148 self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
149 self.set_conf("client.{0}".format(guest_entity), "debug client", "20")
150 self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20")
151 self.set_conf("client.{0}".format(guest_entity),
152 "keyring", guest_mount.get_keyring_path())
153
154 def test_default_prefix(self):
155 group_id = "grpid"
156 volume_id = "volid"
157 DEFAULT_VOL_PREFIX = "volumes"
158 DEFAULT_NS_PREFIX = "fsvolumens_"
159
160 self.mount_b.umount_wait()
161 self._configure_vc_auth(self.mount_b, "manila")
162
163 #create a volume with default prefix
164 self._volume_client_python(self.mount_b, dedent("""
165 vp = VolumePath("{group_id}", "{volume_id}")
166 vc.create_volume(vp, 10, data_isolated=True)
167 """.format(
168 group_id=group_id,
169 volume_id=volume_id,
170 )))
171
172 # The dir should be created
173 self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id))
174
175 #namespace should be set
176 ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace")
177 namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id)
178 self.assertEqual(namespace, ns_in_attr)
179
180
181 def test_lifecycle(self):
182 """
183 General smoke test for create, extend, destroy
184 """
185
186 # I'm going to use mount_c later as a guest for mounting the created
187 # shares
188 self.mounts[2].umount_wait()
189
190 # I'm going to leave mount_b unmounted and just use it as a handle for
191 # driving volumeclient. It's a little hacky but we don't have a more
192 # general concept for librados/libcephfs clients as opposed to full
193 # blown mounting clients.
194 self.mount_b.umount_wait()
195 self._configure_vc_auth(self.mount_b, "manila")
196
197 guest_entity = "guest"
198 group_id = "grpid"
199 volume_id = "volid"
200
201 volume_prefix = "/myprefix"
202 namespace_prefix = "mynsprefix_"
203
204 # Create a 100MB volume
205 volume_size = 100
206 mount_path = self._volume_client_python(self.mount_b, dedent("""
207 vp = VolumePath("{group_id}", "{volume_id}")
208 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
209 print(create_result['mount_path'])
210 """.format(
211 group_id=group_id,
212 volume_id=volume_id,
213 volume_size=volume_size
214 )), volume_prefix, namespace_prefix)
215
216 # The dir should be created
217 self.mount_a.stat(os.path.join("myprefix", group_id, volume_id))
218
219 # Authorize and configure credentials for the guest to mount the
220 # the volume.
221 self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
222 mount_path, namespace_prefix)
223 self.mounts[2].mount(mount_path=mount_path)
224
225 # The kernel client doesn't have the quota-based df behaviour,
226 # or quotas at all, so only exercise the client behaviour when
227 # running fuse.
228 if isinstance(self.mounts[2], FuseMount):
229 # df should see volume size, same as the quota set on volume's dir
230 self.assertEqual(self.mounts[2].df()['total'],
231 volume_size * 1024 * 1024)
232 self.assertEqual(
233 self.mount_a.getfattr(
234 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
235 "ceph.quota.max_bytes"),
236 "%s" % (volume_size * 1024 * 1024))
237
238 # df granularity is 4MB block so have to write at least that much
239 data_bin_mb = 4
240 self.mounts[2].write_n_mb("data.bin", data_bin_mb)
241
242 # Write something outside volume to check this space usage is
243 # not reported in the volume's DF.
244 other_bin_mb = 8
245 self.mount_a.write_n_mb("other.bin", other_bin_mb)
246
247 # global: df should see all the writes (data + other). This is a >
248 # rather than a == because the global spaced used includes all pools
249 def check_df():
250 used = self.mount_a.df()['used']
251 return used >= (other_bin_mb * 1024 * 1024)
252
253 self.wait_until_true(check_df, timeout=30)
254
255 # Hack: do a metadata IO to kick rstats
256 self.mounts[2].run_shell(["touch", "foo"])
257
258 # volume: df should see the data_bin_mb consumed from quota, same
259 # as the rbytes for the volume's dir
260 self.wait_until_equal(
261 lambda: self.mounts[2].df()['used'],
262 data_bin_mb * 1024 * 1024, timeout=60)
263 self.wait_until_equal(
264 lambda: self.mount_a.getfattr(
265 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
266 "ceph.dir.rbytes"),
267 "%s" % (data_bin_mb * 1024 * 1024), timeout=60)
268
269 # sync so that file data are persist to rados
270 self.mounts[2].run_shell(["sync"])
271
272 # Our data should stay in particular rados namespace
273 pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool")
274 namespace = "{0}{1}".format(namespace_prefix, volume_id)
275 ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace")
276 self.assertEqual(namespace, ns_in_attr)
277
278 objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n"))
279 self.assertNotEqual(objects_in_ns, set())
280
281 # De-authorize the guest
282 self._volume_client_python(self.mount_b, dedent("""
283 vp = VolumePath("{group_id}", "{volume_id}")
284 vc.deauthorize(vp, "{guest_entity}")
285 vc.evict("{guest_entity}")
286 """.format(
287 group_id=group_id,
288 volume_id=volume_id,
289 guest_entity=guest_entity
290 )), volume_prefix, namespace_prefix)
291
292 # Once deauthorized, the client should be unable to do any more metadata ops
293 # The way that the client currently behaves here is to block (it acts like
294 # it has lost network, because there is nothing to tell it that is messages
295 # are being dropped because it's identity is gone)
296 background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False)
297 try:
298 background.wait()
299 except CommandFailedError:
300 # command failed with EBLACKLISTED?
301 if "transport endpoint shutdown" in background.stderr.getvalue():
302 pass
303 else:
304 raise
305
306 # After deauthorisation, the client ID should be gone (this was the only
307 # volume it was authorised for)
308 self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()])
309
310 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
311 self.mounts[2].umount_wait()
312
313 self._volume_client_python(self.mount_b, dedent("""
314 vp = VolumePath("{group_id}", "{volume_id}")
315 vc.delete_volume(vp)
316 vc.purge_volume(vp)
317 """.format(
318 group_id=group_id,
319 volume_id=volume_id,
320 )), volume_prefix, namespace_prefix)
321
322 def test_idempotency(self):
323 """
324 That the volumeclient interface works when calling everything twice
325 """
326 self.mount_b.umount_wait()
327 self._configure_vc_auth(self.mount_b, "manila")
328
329 guest_entity = "guest"
330 group_id = "grpid"
331 volume_id = "volid"
332 self._volume_client_python(self.mount_b, dedent("""
333 vp = VolumePath("{group_id}", "{volume_id}")
334 vc.create_volume(vp, 10)
335 vc.create_volume(vp, 10)
336 vc.authorize(vp, "{guest_entity}")
337 vc.authorize(vp, "{guest_entity}")
338 vc.deauthorize(vp, "{guest_entity}")
339 vc.deauthorize(vp, "{guest_entity}")
340 vc.delete_volume(vp)
341 vc.delete_volume(vp)
342 vc.purge_volume(vp)
343 vc.purge_volume(vp)
344
345 vc.create_volume(vp, 10, data_isolated=True)
346 vc.create_volume(vp, 10, data_isolated=True)
347 vc.authorize(vp, "{guest_entity}")
348 vc.authorize(vp, "{guest_entity}")
349 vc.deauthorize(vp, "{guest_entity}")
350 vc.deauthorize(vp, "{guest_entity}")
351 vc.evict("{guest_entity}")
352 vc.evict("{guest_entity}")
353 vc.delete_volume(vp, data_isolated=True)
354 vc.delete_volume(vp, data_isolated=True)
355 vc.purge_volume(vp, data_isolated=True)
356 vc.purge_volume(vp, data_isolated=True)
357
358 vc.create_volume(vp, 10, namespace_isolated=False)
359 vc.create_volume(vp, 10, namespace_isolated=False)
360 vc.authorize(vp, "{guest_entity}")
361 vc.authorize(vp, "{guest_entity}")
362 vc.deauthorize(vp, "{guest_entity}")
363 vc.deauthorize(vp, "{guest_entity}")
364 vc.evict("{guest_entity}")
365 vc.evict("{guest_entity}")
366 vc.delete_volume(vp)
367 vc.delete_volume(vp)
368 vc.purge_volume(vp)
369 vc.purge_volume(vp)
370 """.format(
371 group_id=group_id,
372 volume_id=volume_id,
373 guest_entity=guest_entity
374 )))
375
376 def test_data_isolated(self):
377 """
378 That data isolated shares get their own pool
379 :return:
380 """
381
382 # Because the teuthology config template sets mon_max_pg_per_osd to
383 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
384 # sane before using volume_client, to avoid creating pools with absurdly large
385 # numbers of PGs.
386 self.set_conf("global", "mon max pg per osd", "300")
387 for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
388 mon_daemon_state.restart()
389
390 self.mount_b.umount_wait()
391 self._configure_vc_auth(self.mount_b, "manila")
392
393 # Calculate how many PGs we'll expect the new volume pool to have
394 osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
395 max_per_osd = int(self.fs.get_config('mon_max_pg_per_osd'))
396 osd_count = len(osd_map['osds'])
397 max_overall = osd_count * max_per_osd
398
399 existing_pg_count = 0
400 for p in osd_map['pools']:
401 existing_pg_count += p['pg_num']
402
403 expected_pg_num = (max_overall - existing_pg_count) / 10
404 log.info("max_per_osd {0}".format(max_per_osd))
405 log.info("osd_count {0}".format(osd_count))
406 log.info("max_overall {0}".format(max_overall))
407 log.info("existing_pg_count {0}".format(existing_pg_count))
408 log.info("expected_pg_num {0}".format(expected_pg_num))
409
410 pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
411
412 group_id = "grpid"
413 volume_id = "volid"
414 self._volume_client_python(self.mount_b, dedent("""
415 vp = VolumePath("{group_id}", "{volume_id}")
416 vc.create_volume(vp, 10, data_isolated=True)
417 """.format(
418 group_id=group_id,
419 volume_id=volume_id,
420 )))
421
422 pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
423
424 # Should have created one new pool
425 new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
426 self.assertEqual(len(new_pools), 1)
427
428 # It should have followed the heuristic for PG count
429 # (this is an overly strict test condition, so we may want to remove
430 # it at some point as/when the logic gets fancier)
431 created_pg_num = self.fs.mon_manager.get_pool_property(list(new_pools)[0], "pg_num")
432 self.assertEqual(expected_pg_num, created_pg_num)
433
434 def test_15303(self):
435 """
436 Reproducer for #15303 "Client holds incorrect complete flag on dir
437 after losing caps" (http://tracker.ceph.com/issues/15303)
438 """
439 for m in self.mounts:
440 m.umount_wait()
441
442 # Create a dir on mount A
443 self.mount_a.mount()
444 self.mount_a.run_shell(["mkdir", "parent1"])
445 self.mount_a.run_shell(["mkdir", "parent2"])
446 self.mount_a.run_shell(["mkdir", "parent1/mydir"])
447
448 # Put some files in it from mount B
449 self.mount_b.mount()
450 self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
451 self.mount_b.umount_wait()
452
453 # List the dir's contents on mount A
454 self.assertListEqual(self.mount_a.ls("parent1/mydir"),
455 ["afile"])
456
457 def test_evict_client(self):
458 """
459 That a volume client can be evicted based on its auth ID and the volume
460 path it has mounted.
461 """
462
463 if not isinstance(self.mount_a, FuseMount):
464 self.skipTest("Requires FUSE client to inject client metadata")
465
466 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
467 # and mounts[3] would be used as guests to mount the volumes/shares.
468
469 for i in range(1, 4):
470 self.mounts[i].umount_wait()
471
472 volumeclient_mount = self.mounts[1]
473 self._configure_vc_auth(volumeclient_mount, "manila")
474 guest_mounts = (self.mounts[2], self.mounts[3])
475
476 guest_entity = "guest"
477 group_id = "grpid"
478 mount_paths = []
479 volume_ids = []
480
481 # Create two volumes. Authorize 'guest' auth ID to mount the two
482 # volumes. Mount the two volumes. Write data to the volumes.
483 for i in range(2):
484 # Create volume.
485 volume_ids.append("volid_{0}".format(str(i)))
486 mount_paths.append(
487 self._volume_client_python(volumeclient_mount, dedent("""
488 vp = VolumePath("{group_id}", "{volume_id}")
489 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
490 print(create_result['mount_path'])
491 """.format(
492 group_id=group_id,
493 volume_id=volume_ids[i]
494 ))))
495
496 # Authorize 'guest' auth ID to mount the volume.
497 self._configure_guest_auth(volumeclient_mount, guest_mounts[i],
498 guest_entity, mount_paths[i])
499
500 # Mount the volume.
501 guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format(
502 id=guest_entity, suffix=str(i))
503 guest_mounts[i].mount(mount_path=mount_paths[i])
504 guest_mounts[i].write_n_mb("data.bin", 1)
505
506
507 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
508 # one volume.
509 self._volume_client_python(self.mount_b, dedent("""
510 vp = VolumePath("{group_id}", "{volume_id}")
511 vc.deauthorize(vp, "{guest_entity}")
512 vc.evict("{guest_entity}", volume_path=vp)
513 """.format(
514 group_id=group_id,
515 volume_id=volume_ids[0],
516 guest_entity=guest_entity
517 )))
518
519 # Evicted guest client, guest_mounts[0], should not be able to do
520 # anymore metadata ops. It should start failing all operations
521 # when it sees that its own address is in the blacklist.
522 try:
523 guest_mounts[0].write_n_mb("rogue.bin", 1)
524 except CommandFailedError:
525 pass
526 else:
527 raise RuntimeError("post-eviction write should have failed!")
528
529 # The blacklisted guest client should now be unmountable
530 guest_mounts[0].umount_wait()
531
532 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
533 # has mounted the other volume, should be able to use its volume
534 # unaffected.
535 guest_mounts[1].write_n_mb("data.bin.1", 1)
536
537 # Cleanup.
538 for i in range(2):
539 self._volume_client_python(volumeclient_mount, dedent("""
540 vp = VolumePath("{group_id}", "{volume_id}")
541 vc.deauthorize(vp, "{guest_entity}")
542 vc.delete_volume(vp)
543 vc.purge_volume(vp)
544 """.format(
545 group_id=group_id,
546 volume_id=volume_ids[i],
547 guest_entity=guest_entity
548 )))
549
550
551 def test_purge(self):
552 """
553 Reproducer for #15266, exception trying to purge volumes that
554 contain non-ascii filenames.
555
556 Additionally test any other purge corner cases here.
557 """
558 # I'm going to leave mount_b unmounted and just use it as a handle for
559 # driving volumeclient. It's a little hacky but we don't have a more
560 # general concept for librados/libcephfs clients as opposed to full
561 # blown mounting clients.
562 self.mount_b.umount_wait()
563 self._configure_vc_auth(self.mount_b, "manila")
564
565 group_id = "grpid"
566 # Use a unicode volume ID (like Manila), to reproduce #15266
567 volume_id = u"volid"
568
569 # Create
570 mount_path = self._volume_client_python(self.mount_b, dedent("""
571 vp = VolumePath("{group_id}", u"{volume_id}")
572 create_result = vc.create_volume(vp, 10)
573 print(create_result['mount_path'])
574 """.format(
575 group_id=group_id,
576 volume_id=volume_id
577 )))
578
579 # Strip leading "/"
580 mount_path = mount_path[1:]
581
582 # A file with non-ascii characters
583 self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")])
584
585 # A file with no permissions to do anything
586 self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")])
587 self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")])
588
589 self._volume_client_python(self.mount_b, dedent("""
590 vp = VolumePath("{group_id}", u"{volume_id}")
591 vc.delete_volume(vp)
592 vc.purge_volume(vp)
593 """.format(
594 group_id=group_id,
595 volume_id=volume_id
596 )))
597
598 # Check it's really gone
599 self.assertEqual(self.mount_a.ls("volumes/_deleting"), [])
600 self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id])
601
602 def test_readonly_authorization(self):
603 """
604 That guest clients can be restricted to read-only mounts of volumes.
605 """
606
607 volumeclient_mount = self.mounts[1]
608 guest_mount = self.mounts[2]
609 volumeclient_mount.umount_wait()
610 guest_mount.umount_wait()
611
612 # Configure volumeclient_mount as the handle for driving volumeclient.
613 self._configure_vc_auth(volumeclient_mount, "manila")
614
615 guest_entity = "guest"
616 group_id = "grpid"
617 volume_id = "volid"
618
619 # Create a volume.
620 mount_path = self._volume_client_python(volumeclient_mount, dedent("""
621 vp = VolumePath("{group_id}", "{volume_id}")
622 create_result = vc.create_volume(vp, 1024*1024*10)
623 print(create_result['mount_path'])
624 """.format(
625 group_id=group_id,
626 volume_id=volume_id,
627 )))
628
629 # Authorize and configure credentials for the guest to mount the
630 # the volume with read-write access.
631 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
632 mount_path, readonly=False)
633
634 # Mount the volume, and write to it.
635 guest_mount.mount(mount_path=mount_path)
636 guest_mount.write_n_mb("data.bin", 1)
637
638 # Change the guest auth ID's authorization to read-only mount access.
639 self._volume_client_python(volumeclient_mount, dedent("""
640 vp = VolumePath("{group_id}", "{volume_id}")
641 vc.deauthorize(vp, "{guest_entity}")
642 """.format(
643 group_id=group_id,
644 volume_id=volume_id,
645 guest_entity=guest_entity
646 )))
647 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
648 mount_path, readonly=True)
649
650 # The effect of the change in access level to read-only is not
651 # immediate. The guest sees the change only after a remount of
652 # the volume.
653 guest_mount.umount_wait()
654 guest_mount.mount(mount_path=mount_path)
655
656 # Read existing content of the volume.
657 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
658 # Cannot write into read-only volume.
659 with self.assertRaises(CommandFailedError):
660 guest_mount.write_n_mb("rogue.bin", 1)
661
662 def test_get_authorized_ids(self):
663 """
664 That for a volume, the authorized IDs and their access levels
665 can be obtained using CephFSVolumeClient's get_authorized_ids().
666 """
667 volumeclient_mount = self.mounts[1]
668 volumeclient_mount.umount_wait()
669
670 # Configure volumeclient_mount as the handle for driving volumeclient.
671 self._configure_vc_auth(volumeclient_mount, "manila")
672
673 group_id = "grpid"
674 volume_id = "volid"
675 guest_entity_1 = "guest1"
676 guest_entity_2 = "guest2"
677
678 log.info("print(group ID: {0})".format(group_id))
679
680 # Create a volume.
681 auths = self._volume_client_python(volumeclient_mount, dedent("""
682 vp = VolumePath("{group_id}", "{volume_id}")
683 vc.create_volume(vp, 1024*1024*10)
684 auths = vc.get_authorized_ids(vp)
685 print(auths)
686 """.format(
687 group_id=group_id,
688 volume_id=volume_id,
689 )))
690 # Check the list of authorized IDs for the volume.
691 expected_result = None
692 self.assertEqual(str(expected_result), auths)
693
694 # Allow two auth IDs access to the volume.
695 auths = self._volume_client_python(volumeclient_mount, dedent("""
696 vp = VolumePath("{group_id}", "{volume_id}")
697 vc.authorize(vp, "{guest_entity_1}", readonly=False)
698 vc.authorize(vp, "{guest_entity_2}", readonly=True)
699 auths = vc.get_authorized_ids(vp)
700 print(auths)
701 """.format(
702 group_id=group_id,
703 volume_id=volume_id,
704 guest_entity_1=guest_entity_1,
705 guest_entity_2=guest_entity_2,
706 )))
707 # Check the list of authorized IDs and their access levels.
708 if self.py_version == 'python3':
709 expected_result = [('guest1', 'rw'), ('guest2', 'r')]
710 else:
711 expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
712
713 self.assertItemsEqual(str(expected_result), auths)
714
715 # Disallow both the auth IDs' access to the volume.
716 auths = self._volume_client_python(volumeclient_mount, dedent("""
717 vp = VolumePath("{group_id}", "{volume_id}")
718 vc.deauthorize(vp, "{guest_entity_1}")
719 vc.deauthorize(vp, "{guest_entity_2}")
720 auths = vc.get_authorized_ids(vp)
721 print(auths)
722 """.format(
723 group_id=group_id,
724 volume_id=volume_id,
725 guest_entity_1=guest_entity_1,
726 guest_entity_2=guest_entity_2,
727 )))
728 # Check the list of authorized IDs for the volume.
729 expected_result = None
730 self.assertItemsEqual(str(expected_result), auths)
731
732 def test_multitenant_volumes(self):
733 """
734 That volume access can be restricted to a tenant.
735
736 That metadata used to enforce tenant isolation of
737 volumes is stored as a two-way mapping between auth
738 IDs and volumes that they're authorized to access.
739 """
740 volumeclient_mount = self.mounts[1]
741 volumeclient_mount.umount_wait()
742
743 # Configure volumeclient_mount as the handle for driving volumeclient.
744 self._configure_vc_auth(volumeclient_mount, "manila")
745
746 group_id = "groupid"
747 volume_id = "volumeid"
748
749 # Guest clients belonging to different tenants, but using the same
750 # auth ID.
751 auth_id = "guest"
752 guestclient_1 = {
753 "auth_id": auth_id,
754 "tenant_id": "tenant1",
755 }
756 guestclient_2 = {
757 "auth_id": auth_id,
758 "tenant_id": "tenant2",
759 }
760
761 # Create a volume.
762 self._volume_client_python(volumeclient_mount, dedent("""
763 vp = VolumePath("{group_id}", "{volume_id}")
764 vc.create_volume(vp, 1024*1024*10)
765 """.format(
766 group_id=group_id,
767 volume_id=volume_id,
768 )))
769
770 # Check that volume metadata file is created on volume creation.
771 vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id)
772 self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
773
774 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
775 # 'tenant1', with 'rw' access to the volume.
776 self._volume_client_python(volumeclient_mount, dedent("""
777 vp = VolumePath("{group_id}", "{volume_id}")
778 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
779 """.format(
780 group_id=group_id,
781 volume_id=volume_id,
782 auth_id=guestclient_1["auth_id"],
783 tenant_id=guestclient_1["tenant_id"]
784 )))
785
786 # Check that auth metadata file for auth ID 'guest', is
787 # created on authorizing 'guest' access to the volume.
788 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
789 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
790
791 # Verify that the auth metadata file stores the tenant ID that the
792 # auth ID belongs to, the auth ID's authorized access levels
793 # for different volumes, versioning details, etc.
794 expected_auth_metadata = {
795 "version": 2,
796 "compat_version": 1,
797 "dirty": False,
798 "tenant_id": "tenant1",
799 "volumes": {
800 "groupid/volumeid": {
801 "dirty": False,
802 "access_level": "rw"
803 }
804 }
805 }
806
807 auth_metadata = self._volume_client_python(volumeclient_mount, dedent("""
808 import json
809 vp = VolumePath("{group_id}", "{volume_id}")
810 auth_metadata = vc._auth_metadata_get("{auth_id}")
811 print(json.dumps(auth_metadata))
812 """.format(
813 group_id=group_id,
814 volume_id=volume_id,
815 auth_id=guestclient_1["auth_id"],
816 )))
817 auth_metadata = json.loads(auth_metadata)
818
819 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
820 del expected_auth_metadata["version"]
821 del auth_metadata["version"]
822 self.assertEqual(expected_auth_metadata, auth_metadata)
823
824 # Verify that the volume metadata file stores info about auth IDs
825 # and their access levels to the volume, versioning details, etc.
826 expected_vol_metadata = {
827 "version": 2,
828 "compat_version": 1,
829 "auths": {
830 "guest": {
831 "dirty": False,
832 "access_level": "rw"
833 }
834 }
835 }
836
837 vol_metadata = self._volume_client_python(volumeclient_mount, dedent("""
838 import json
839 vp = VolumePath("{group_id}", "{volume_id}")
840 volume_metadata = vc._volume_metadata_get(vp)
841 print(json.dumps(volume_metadata))
842 """.format(
843 group_id=group_id,
844 volume_id=volume_id,
845 )))
846 vol_metadata = json.loads(vol_metadata)
847
848 self.assertGreaterEqual(vol_metadata["version"], expected_vol_metadata["version"])
849 del expected_vol_metadata["version"]
850 del vol_metadata["version"]
851 self.assertEqual(expected_vol_metadata, vol_metadata)
852
853 # Cannot authorize 'guestclient_2' to access the volume.
854 # It uses auth ID 'guest', which has already been used by a
855 # 'guestclient_1' belonging to an another tenant for accessing
856 # the volume.
857 with self.assertRaises(CommandFailedError):
858 self._volume_client_python(volumeclient_mount, dedent("""
859 vp = VolumePath("{group_id}", "{volume_id}")
860 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
861 """.format(
862 group_id=group_id,
863 volume_id=volume_id,
864 auth_id=guestclient_2["auth_id"],
865 tenant_id=guestclient_2["tenant_id"]
866 )))
867
868 # Check that auth metadata file is cleaned up on removing
869 # auth ID's only access to a volume.
870 self._volume_client_python(volumeclient_mount, dedent("""
871 vp = VolumePath("{group_id}", "{volume_id}")
872 vc.deauthorize(vp, "{guest_entity}")
873 """.format(
874 group_id=group_id,
875 volume_id=volume_id,
876 guest_entity=guestclient_1["auth_id"]
877 )))
878
879 self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
880
881 # Check that volume metadata file is cleaned up on volume deletion.
882 self._volume_client_python(volumeclient_mount, dedent("""
883 vp = VolumePath("{group_id}", "{volume_id}")
884 vc.delete_volume(vp)
885 """.format(
886 group_id=group_id,
887 volume_id=volume_id,
888 )))
889 self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
890
891 def test_recover_metadata(self):
892 """
893 That volume client can recover from partial auth updates using
894 metadata files, which store auth info and its update status info.
895 """
896 volumeclient_mount = self.mounts[1]
897 volumeclient_mount.umount_wait()
898
899 # Configure volumeclient_mount as the handle for driving volumeclient.
900 self._configure_vc_auth(volumeclient_mount, "manila")
901
902 group_id = "groupid"
903 volume_id = "volumeid"
904
905 guestclient = {
906 "auth_id": "guest",
907 "tenant_id": "tenant",
908 }
909
910 # Create a volume.
911 self._volume_client_python(volumeclient_mount, dedent("""
912 vp = VolumePath("{group_id}", "{volume_id}")
913 vc.create_volume(vp, 1024*1024*10)
914 """.format(
915 group_id=group_id,
916 volume_id=volume_id,
917 )))
918
919 # Authorize 'guestclient' access to the volume.
920 self._volume_client_python(volumeclient_mount, dedent("""
921 vp = VolumePath("{group_id}", "{volume_id}")
922 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
923 """.format(
924 group_id=group_id,
925 volume_id=volume_id,
926 auth_id=guestclient["auth_id"],
927 tenant_id=guestclient["tenant_id"]
928 )))
929
930 # Check that auth metadata file for auth ID 'guest' is created.
931 auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"])
932 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
933
934 # Induce partial auth update state by modifying the auth metadata file,
935 # and then run recovery procedure.
936 self._volume_client_python(volumeclient_mount, dedent("""
937 vp = VolumePath("{group_id}", "{volume_id}")
938 auth_metadata = vc._auth_metadata_get("{auth_id}")
939 auth_metadata['dirty'] = True
940 vc._auth_metadata_set("{auth_id}", auth_metadata)
941 vc.recover()
942 """.format(
943 group_id=group_id,
944 volume_id=volume_id,
945 auth_id=guestclient["auth_id"],
946 )))
947
948 def test_put_object(self):
949 vc_mount = self.mounts[1]
950 vc_mount.umount_wait()
951 self._configure_vc_auth(vc_mount, "manila")
952
953 obj_data = 'test data'
954 obj_name = 'test_vc_obj_1'
955 pool_name = self.fs.get_data_pool_names()[0]
956
957 self._volume_client_python(vc_mount, dedent("""
958 vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}")
959 """.format(
960 pool_name = pool_name,
961 obj_name = obj_name,
962 obj_data = obj_data
963 )))
964
965 read_data = self.fs.rados(['get', obj_name, '-'], pool=pool_name)
966 self.assertEqual(obj_data, read_data)
967
968 def test_get_object(self):
969 vc_mount = self.mounts[1]
970 vc_mount.umount_wait()
971 self._configure_vc_auth(vc_mount, "manila")
972
973 obj_data = 'test_data'
974 obj_name = 'test_vc_ob_2'
975 pool_name = self.fs.get_data_pool_names()[0]
976
977 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
978
979 self._volume_client_python(vc_mount, dedent("""
980 data_read = vc.get_object("{pool_name}", "{obj_name}")
981 assert data_read == b"{obj_data}"
982 """.format(
983 pool_name = pool_name,
984 obj_name = obj_name,
985 obj_data = obj_data
986 )))
987
988 def test_put_object_versioned(self):
989 vc_mount = self.mounts[1]
990 vc_mount.umount_wait()
991 self._configure_vc_auth(vc_mount, "manila")
992
993 obj_data = 'test_data'
994 obj_name = 'test_vc_ob_2'
995 pool_name = self.fs.get_data_pool_names()[0]
996 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
997
998 # Test if put_object_versioned() crosschecks the version of the
999 # given object. Being a negative test, an exception is expected.
1000 with self.assertRaises(CommandFailedError):
1001 self._volume_client_python(vc_mount, dedent("""
1002 data, version = vc.get_object_and_version("{pool_name}", "{obj_name}")
1003 data += 'm1'
1004 vc.put_object("{pool_name}", "{obj_name}", data)
1005 data += 'm2'
1006 vc.put_object_versioned("{pool_name}", "{obj_name}", data, version)
1007 """).format(pool_name=pool_name, obj_name=obj_name))
1008
1009 def test_delete_object(self):
1010 vc_mount = self.mounts[1]
1011 vc_mount.umount_wait()
1012 self._configure_vc_auth(vc_mount, "manila")
1013
1014 obj_data = 'test data'
1015 obj_name = 'test_vc_obj_3'
1016 pool_name = self.fs.get_data_pool_names()[0]
1017
1018 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
1019
1020 self._volume_client_python(vc_mount, dedent("""
1021 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1022 """.format(
1023 pool_name = pool_name,
1024 obj_name = obj_name,
1025 )))
1026
1027 with self.assertRaises(CommandFailedError):
1028 self.fs.rados(['stat', obj_name], pool=pool_name)
1029
1030 # Check idempotency -- no error raised trying to delete non-existent
1031 # object
1032 self._volume_client_python(vc_mount, dedent("""
1033 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1034 """.format(
1035 pool_name = pool_name,
1036 obj_name = obj_name,
1037 )))
1038
1039 def test_21501(self):
1040 """
1041 Reproducer for #21501 "ceph_volume_client: sets invalid caps for
1042 existing IDs with no caps" (http://tracker.ceph.com/issues/21501)
1043 """
1044
1045 vc_mount = self.mounts[1]
1046 vc_mount.umount_wait()
1047
1048 # Configure vc_mount as the handle for driving volumeclient
1049 self._configure_vc_auth(vc_mount, "manila")
1050
1051 # Create a volume
1052 group_id = "grpid"
1053 volume_id = "volid"
1054 mount_path = self._volume_client_python(vc_mount, dedent("""
1055 vp = VolumePath("{group_id}", "{volume_id}")
1056 create_result = vc.create_volume(vp, 1024*1024*10)
1057 print(create_result['mount_path'])
1058 """.format(
1059 group_id=group_id,
1060 volume_id=volume_id
1061 )))
1062
1063 # Create an auth ID with no caps
1064 guest_id = '21501'
1065 self.fs.mon_manager.raw_cluster_cmd_result(
1066 'auth', 'get-or-create', 'client.{0}'.format(guest_id))
1067
1068 guest_mount = self.mounts[2]
1069 guest_mount.umount_wait()
1070
1071 # Set auth caps for the auth ID using the volumeclient
1072 self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path)
1073
1074 # Mount the volume in the guest using the auth ID to assert that the
1075 # auth caps are valid
1076 guest_mount.mount(mount_path=mount_path)
1077
1078 def test_volume_without_namespace_isolation(self):
1079 """
1080 That volume client can create volumes that do not have separate RADOS
1081 namespace layouts.
1082 """
1083 vc_mount = self.mounts[1]
1084 vc_mount.umount_wait()
1085
1086 # Configure vc_mount as the handle for driving volumeclient
1087 self._configure_vc_auth(vc_mount, "manila")
1088
1089 # Create a volume
1090 volume_prefix = "/myprefix"
1091 group_id = "grpid"
1092 volume_id = "volid"
1093 mount_path = self._volume_client_python(vc_mount, dedent("""
1094 vp = VolumePath("{group_id}", "{volume_id}")
1095 create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
1096 print(create_result['mount_path'])
1097 """.format(
1098 group_id=group_id,
1099 volume_id=volume_id
1100 )), volume_prefix)
1101
1102 # The CephFS volume should be created
1103 self.mounts[0].stat(os.path.join("myprefix", group_id, volume_id))
1104 vol_namespace = self.mounts[0].getfattr(
1105 os.path.join("myprefix", group_id, volume_id),
1106 "ceph.dir.layout.pool_namespace")
1107 assert not vol_namespace
1108
1109 self._volume_client_python(vc_mount, dedent("""
1110 vp = VolumePath("{group_id}", "{volume_id}")
1111 vc.delete_volume(vp)
1112 vc.purge_volume(vp)
1113 """.format(
1114 group_id=group_id,
1115 volume_id=volume_id,
1116 )), volume_prefix)