]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / qa / tasks / cephfs / test_volume_client.py
1 import json
2 import logging
3 import time
4 import os
5 from textwrap import dedent
6 from tasks.cephfs.cephfs_test_case import CephFSTestCase
7 from tasks.cephfs.fuse_mount import FuseMount
8 from teuthology.exceptions import CommandFailedError
9
10 log = logging.getLogger(__name__)
11
12
13 class TestVolumeClient(CephFSTestCase):
14 #
15 # TODO: Test that VolumeClient can recover from partial auth updates.
16 #
17
18 # One for looking at the global filesystem, one for being
19 # the VolumeClient, two for mounting the created shares
20 CLIENTS_REQUIRED = 4
21
22 def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
23 # Can't dedent this *and* the script we pass in, because they might have different
24 # levels of indentation to begin with, so leave this string zero-indented
25 if vol_prefix:
26 vol_prefix = "\"" + vol_prefix + "\""
27 if ns_prefix:
28 ns_prefix = "\"" + ns_prefix + "\""
29 return client.run_python("""
30 from ceph_volume_client import CephFSVolumeClient, VolumePath
31 import logging
32 log = logging.getLogger("ceph_volume_client")
33 log.addHandler(logging.StreamHandler())
34 log.setLevel(logging.DEBUG)
35 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
36 vc.connect()
37 {payload}
38 vc.disconnect()
39 """.format(payload=script, conf_path=client.config_path, vol_prefix=vol_prefix, ns_prefix=ns_prefix))
40
41 def _sudo_write_file(self, remote, path, data):
42 """
43 Write data to a remote file as super user
44
45 :param remote: Remote site.
46 :param path: Path on the remote being written to.
47 :param data: Data to be written.
48
49 Both perms and owner are passed directly to chmod.
50 """
51 remote.run(
52 args=[
53 'sudo',
54 'python',
55 '-c',
56 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
57 path,
58 ],
59 stdin=data,
60 )
61
62 def _configure_vc_auth(self, mount, id_name):
63 """
64 Set up auth credentials for the VolumeClient user
65 """
66 out = self.fs.mon_manager.raw_cluster_cmd(
67 "auth", "get-or-create", "client.{name}".format(name=id_name),
68 "mds", "allow *",
69 "osd", "allow rw",
70 "mon", "allow *"
71 )
72 mount.client_id = id_name
73 self._sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
74 self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
75
76 def _configure_guest_auth(self, volumeclient_mount, guest_mount,
77 guest_entity, mount_path,
78 namespace_prefix=None, readonly=False,
79 tenant_id=None):
80 """
81 Set up auth credentials for the guest client to mount a volume.
82
83 :param volumeclient_mount: mount used as the handle for driving
84 volumeclient.
85 :param guest_mount: mount used by the guest client.
86 :param guest_entity: auth ID used by the guest client.
87 :param mount_path: path of the volume.
88 :param namespace_prefix: name prefix of the RADOS namespace, which
89 is used for the volume's layout.
90 :param readonly: defaults to False. If set to 'True' only read-only
91 mount access is granted to the guest.
92 :param tenant_id: (OpenStack) tenant ID of the guest client.
93 """
94
95 head, volume_id = os.path.split(mount_path)
96 head, group_id = os.path.split(head)
97 head, volume_prefix = os.path.split(head)
98 volume_prefix = "/" + volume_prefix
99
100 # Authorize the guest client's auth ID to mount the volume.
101 key = self._volume_client_python(volumeclient_mount, dedent("""
102 vp = VolumePath("{group_id}", "{volume_id}")
103 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
104 tenant_id="{tenant_id}")
105 print auth_result['auth_key']
106 """.format(
107 group_id=group_id,
108 volume_id=volume_id,
109 guest_entity=guest_entity,
110 readonly=readonly,
111 tenant_id=tenant_id)), volume_prefix, namespace_prefix
112 )
113
114 # CephFSVolumeClient's authorize() does not return the secret
115 # key to a caller who isn't multi-tenant aware. Explicitly
116 # query the key for such a client.
117 if not tenant_id:
118 key = self.fs.mon_manager.raw_cluster_cmd(
119 "auth", "get-key", "client.{name}".format(name=guest_entity),
120 )
121
122 # The guest auth ID should exist.
123 existing_ids = [a['entity'] for a in self.auth_list()]
124 self.assertIn("client.{0}".format(guest_entity), existing_ids)
125
126 # Create keyring file for the guest client.
127 keyring_txt = dedent("""
128 [client.{guest_entity}]
129 key = {key}
130
131 """.format(
132 guest_entity=guest_entity,
133 key=key
134 ))
135 guest_mount.client_id = guest_entity
136 self._sudo_write_file(guest_mount.client_remote,
137 guest_mount.get_keyring_path(),
138 keyring_txt)
139
140 # Add a guest client section to the ceph config file.
141 self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
142 self.set_conf("client.{0}".format(guest_entity), "debug client", "20")
143 self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20")
144 self.set_conf("client.{0}".format(guest_entity),
145 "keyring", guest_mount.get_keyring_path())
146
147 def test_default_prefix(self):
148 group_id = "grpid"
149 volume_id = "volid"
150 DEFAULT_VOL_PREFIX = "volumes"
151 DEFAULT_NS_PREFIX = "fsvolumens_"
152
153 self.mount_b.umount_wait()
154 self._configure_vc_auth(self.mount_b, "manila")
155
156 #create a volume with default prefix
157 self._volume_client_python(self.mount_b, dedent("""
158 vp = VolumePath("{group_id}", "{volume_id}")
159 vc.create_volume(vp, 10, data_isolated=True)
160 """.format(
161 group_id=group_id,
162 volume_id=volume_id,
163 )))
164
165 # The dir should be created
166 self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id))
167
168 #namespace should be set
169 ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace")
170 namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id)
171 self.assertEqual(namespace, ns_in_attr)
172
173
174 def test_lifecycle(self):
175 """
176 General smoke test for create, extend, destroy
177 """
178
179 # I'm going to use mount_c later as a guest for mounting the created
180 # shares
181 self.mounts[2].umount_wait()
182
183 # I'm going to leave mount_b unmounted and just use it as a handle for
184 # driving volumeclient. It's a little hacky but we don't have a more
185 # general concept for librados/libcephfs clients as opposed to full
186 # blown mounting clients.
187 self.mount_b.umount_wait()
188 self._configure_vc_auth(self.mount_b, "manila")
189
190 guest_entity = "guest"
191 group_id = "grpid"
192 volume_id = "volid"
193
194 volume_prefix = "/myprefix"
195 namespace_prefix = "mynsprefix_"
196
197 # Create a 100MB volume
198 volume_size = 100
199 mount_path = self._volume_client_python(self.mount_b, dedent("""
200 vp = VolumePath("{group_id}", "{volume_id}")
201 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
202 print create_result['mount_path']
203 """.format(
204 group_id=group_id,
205 volume_id=volume_id,
206 volume_size=volume_size
207 )), volume_prefix, namespace_prefix)
208
209 # The dir should be created
210 self.mount_a.stat(os.path.join("myprefix", group_id, volume_id))
211
212 # Authorize and configure credentials for the guest to mount the
213 # the volume.
214 self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
215 mount_path, namespace_prefix)
216 self.mounts[2].mount(mount_path=mount_path)
217
218 # The kernel client doesn't have the quota-based df behaviour,
219 # or quotas at all, so only exercise the client behaviour when
220 # running fuse.
221 if isinstance(self.mounts[2], FuseMount):
222 # df should see volume size, same as the quota set on volume's dir
223 self.assertEqual(self.mounts[2].df()['total'],
224 volume_size * 1024 * 1024)
225 self.assertEqual(
226 self.mount_a.getfattr(
227 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
228 "ceph.quota.max_bytes"),
229 "%s" % (volume_size * 1024 * 1024))
230
231 # df granularity is 4MB block so have to write at least that much
232 data_bin_mb = 4
233 self.mounts[2].write_n_mb("data.bin", data_bin_mb)
234
235 # Write something outside volume to check this space usage is
236 # not reported in the volume's DF.
237 other_bin_mb = 6
238 self.mount_a.write_n_mb("other.bin", other_bin_mb)
239
240 # global: df should see all the writes (data + other). This is a >
241 # rather than a == because the global spaced used includes all pools
242 self.assertGreater(self.mount_a.df()['used'],
243 (data_bin_mb + other_bin_mb) * 1024 * 1024)
244
245 # Hack: do a metadata IO to kick rstats
246 self.mounts[2].run_shell(["touch", "foo"])
247
248 # volume: df should see the data_bin_mb consumed from quota, same
249 # as the rbytes for the volume's dir
250 self.wait_until_equal(
251 lambda: self.mounts[2].df()['used'],
252 data_bin_mb * 1024 * 1024, timeout=60)
253 self.wait_until_equal(
254 lambda: self.mount_a.getfattr(
255 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
256 "ceph.dir.rbytes"),
257 "%s" % (data_bin_mb * 1024 * 1024), timeout=60)
258
259 # sync so that file data are persist to rados
260 self.mounts[2].run_shell(["sync"])
261
262 # Our data should stay in particular rados namespace
263 pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool")
264 namespace = "{0}{1}".format(namespace_prefix, volume_id)
265 ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace")
266 self.assertEqual(namespace, ns_in_attr)
267
268 objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n"))
269 self.assertNotEqual(objects_in_ns, set())
270
271 # De-authorize the guest
272 self._volume_client_python(self.mount_b, dedent("""
273 vp = VolumePath("{group_id}", "{volume_id}")
274 vc.deauthorize(vp, "{guest_entity}")
275 vc.evict("{guest_entity}")
276 """.format(
277 group_id=group_id,
278 volume_id=volume_id,
279 guest_entity=guest_entity
280 )), volume_prefix, namespace_prefix)
281
282 # Once deauthorized, the client should be unable to do any more metadata ops
283 # The way that the client currently behaves here is to block (it acts like
284 # it has lost network, because there is nothing to tell it that is messages
285 # are being dropped because it's identity is gone)
286 background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False)
287 time.sleep(10) # Approximate check for 'stuck' as 'still running after 10s'
288 self.assertFalse(background.finished)
289
290 # After deauthorisation, the client ID should be gone (this was the only
291 # volume it was authorised for)
292 self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()])
293
294 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
295 self.mounts[2].kill()
296 self.mounts[2].kill_cleanup()
297 try:
298 background.wait()
299 except CommandFailedError:
300 # We killed the mount out from under you
301 pass
302
303 self._volume_client_python(self.mount_b, dedent("""
304 vp = VolumePath("{group_id}", "{volume_id}")
305 vc.delete_volume(vp)
306 vc.purge_volume(vp)
307 """.format(
308 group_id=group_id,
309 volume_id=volume_id,
310 )), volume_prefix, namespace_prefix)
311
312 def test_idempotency(self):
313 """
314 That the volumeclient interface works when calling everything twice
315 """
316 self.mount_b.umount_wait()
317 self._configure_vc_auth(self.mount_b, "manila")
318
319 guest_entity = "guest"
320 group_id = "grpid"
321 volume_id = "volid"
322 self._volume_client_python(self.mount_b, dedent("""
323 vp = VolumePath("{group_id}", "{volume_id}")
324 vc.create_volume(vp, 10)
325 vc.create_volume(vp, 10)
326 vc.authorize(vp, "{guest_entity}")
327 vc.authorize(vp, "{guest_entity}")
328 vc.deauthorize(vp, "{guest_entity}")
329 vc.deauthorize(vp, "{guest_entity}")
330 vc.delete_volume(vp)
331 vc.delete_volume(vp)
332 vc.purge_volume(vp)
333 vc.purge_volume(vp)
334
335 vc.create_volume(vp, 10, data_isolated=True)
336 vc.create_volume(vp, 10, data_isolated=True)
337 vc.authorize(vp, "{guest_entity}")
338 vc.authorize(vp, "{guest_entity}")
339 vc.deauthorize(vp, "{guest_entity}")
340 vc.deauthorize(vp, "{guest_entity}")
341 vc.evict("{guest_entity}")
342 vc.evict("{guest_entity}")
343 vc.delete_volume(vp, data_isolated=True)
344 vc.delete_volume(vp, data_isolated=True)
345 vc.purge_volume(vp, data_isolated=True)
346 vc.purge_volume(vp, data_isolated=True)
347 """.format(
348 group_id=group_id,
349 volume_id=volume_id,
350 guest_entity=guest_entity
351 )))
352
353 def test_data_isolated(self):
354 """
355 That data isolated shares get their own pool
356 :return:
357 """
358
359 # Because the teuthology config template sets mon_pg_warn_max_per_osd to
360 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
361 # sane before using volume_client, to avoid creating pools with absurdly large
362 # numbers of PGs.
363 self.set_conf("global", "mon pg warn max per osd", "300")
364 for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
365 mon_daemon_state.restart()
366
367 self.mount_b.umount_wait()
368 self._configure_vc_auth(self.mount_b, "manila")
369
370 # Calculate how many PGs we'll expect the new volume pool to have
371 osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
372 max_per_osd = int(self.fs.get_config('mon_pg_warn_max_per_osd'))
373 osd_count = len(osd_map['osds'])
374 max_overall = osd_count * max_per_osd
375
376 existing_pg_count = 0
377 for p in osd_map['pools']:
378 existing_pg_count += p['pg_num']
379
380 expected_pg_num = (max_overall - existing_pg_count) / 10
381 log.info("max_per_osd {0}".format(max_per_osd))
382 log.info("osd_count {0}".format(osd_count))
383 log.info("max_overall {0}".format(max_overall))
384 log.info("existing_pg_count {0}".format(existing_pg_count))
385 log.info("expected_pg_num {0}".format(expected_pg_num))
386
387 pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
388
389 group_id = "grpid"
390 volume_id = "volid"
391 self._volume_client_python(self.mount_b, dedent("""
392 vp = VolumePath("{group_id}", "{volume_id}")
393 vc.create_volume(vp, 10, data_isolated=True)
394 """.format(
395 group_id=group_id,
396 volume_id=volume_id,
397 )))
398
399 pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
400
401 # Should have created one new pool
402 new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
403 self.assertEqual(len(new_pools), 1)
404
405 # It should have followed the heuristic for PG count
406 # (this is an overly strict test condition, so we may want to remove
407 # it at some point as/when the logic gets fancier)
408 created_pg_num = self.fs.mon_manager.get_pool_property(list(new_pools)[0], "pg_num")
409 self.assertEqual(expected_pg_num, created_pg_num)
410
411 def test_15303(self):
412 """
413 Reproducer for #15303 "Client holds incorrect complete flag on dir
414 after losing caps" (http://tracker.ceph.com/issues/15303)
415 """
416 for m in self.mounts:
417 m.umount_wait()
418
419 # Create a dir on mount A
420 self.mount_a.mount()
421 self.mount_a.run_shell(["mkdir", "parent1"])
422 self.mount_a.run_shell(["mkdir", "parent2"])
423 self.mount_a.run_shell(["mkdir", "parent1/mydir"])
424
425 # Put some files in it from mount B
426 self.mount_b.mount()
427 self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
428 self.mount_b.umount_wait()
429
430 # List the dir's contents on mount A
431 self.assertListEqual(self.mount_a.ls("parent1/mydir"),
432 ["afile"])
433
434 def test_evict_client(self):
435 """
436 That a volume client can be evicted based on its auth ID and the volume
437 path it has mounted.
438 """
439
440 if not isinstance(self.mount_a, FuseMount):
441 self.skipTest("Requires FUSE client to inject client metadata")
442
443 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
444 # and mounts[3] would be used as guests to mount the volumes/shares.
445
446 for i in range(1, 4):
447 self.mounts[i].umount_wait()
448
449 volumeclient_mount = self.mounts[1]
450 self._configure_vc_auth(volumeclient_mount, "manila")
451 guest_mounts = (self.mounts[2], self.mounts[3])
452
453 guest_entity = "guest"
454 group_id = "grpid"
455 mount_paths = []
456 volume_ids = []
457
458 # Create two volumes. Authorize 'guest' auth ID to mount the two
459 # volumes. Mount the two volumes. Write data to the volumes.
460 for i in range(2):
461 # Create volume.
462 volume_ids.append("volid_{0}".format(str(i)))
463 mount_paths.append(
464 self._volume_client_python(volumeclient_mount, dedent("""
465 vp = VolumePath("{group_id}", "{volume_id}")
466 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
467 print create_result['mount_path']
468 """.format(
469 group_id=group_id,
470 volume_id=volume_ids[i]
471 ))))
472
473 # Authorize 'guest' auth ID to mount the volume.
474 self._configure_guest_auth(volumeclient_mount, guest_mounts[i],
475 guest_entity, mount_paths[i])
476
477 # Mount the volume.
478 guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format(
479 id=guest_entity, suffix=str(i))
480 guest_mounts[i].mount(mount_path=mount_paths[i])
481 guest_mounts[i].write_n_mb("data.bin", 1)
482
483
484 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
485 # one volume.
486 self._volume_client_python(self.mount_b, dedent("""
487 vp = VolumePath("{group_id}", "{volume_id}")
488 vc.deauthorize(vp, "{guest_entity}")
489 vc.evict("{guest_entity}", volume_path=vp)
490 """.format(
491 group_id=group_id,
492 volume_id=volume_ids[0],
493 guest_entity=guest_entity
494 )))
495
496 # Evicted guest client, guest_mounts[0], should not be able to do
497 # anymore metadata ops. It behaves as if it has lost network
498 # connection.
499 background = guest_mounts[0].write_n_mb("rogue.bin", 1, wait=False)
500 # Approximate check for 'stuck' as 'still running after 10s'.
501 time.sleep(10)
502 self.assertFalse(background.finished)
503
504 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
505 # has mounted the other volume, should be able to use its volume
506 # unaffected.
507 guest_mounts[1].write_n_mb("data.bin.1", 1)
508
509 # Cleanup.
510 for i in range(2):
511 self._volume_client_python(volumeclient_mount, dedent("""
512 vp = VolumePath("{group_id}", "{volume_id}")
513 vc.deauthorize(vp, "{guest_entity}")
514 vc.delete_volume(vp)
515 vc.purge_volume(vp)
516 """.format(
517 group_id=group_id,
518 volume_id=volume_ids[i],
519 guest_entity=guest_entity
520 )))
521
522 # We must hard-umount the one that we evicted
523 guest_mounts[0].umount_wait(force=True)
524
525 def test_purge(self):
526 """
527 Reproducer for #15266, exception trying to purge volumes that
528 contain non-ascii filenames.
529
530 Additionally test any other purge corner cases here.
531 """
532 # I'm going to leave mount_b unmounted and just use it as a handle for
533 # driving volumeclient. It's a little hacky but we don't have a more
534 # general concept for librados/libcephfs clients as opposed to full
535 # blown mounting clients.
536 self.mount_b.umount_wait()
537 self._configure_vc_auth(self.mount_b, "manila")
538
539 group_id = "grpid"
540 # Use a unicode volume ID (like Manila), to reproduce #15266
541 volume_id = u"volid"
542
543 # Create
544 mount_path = self._volume_client_python(self.mount_b, dedent("""
545 vp = VolumePath("{group_id}", u"{volume_id}")
546 create_result = vc.create_volume(vp, 10)
547 print create_result['mount_path']
548 """.format(
549 group_id=group_id,
550 volume_id=volume_id
551 )))
552
553 # Strip leading "/"
554 mount_path = mount_path[1:]
555
556 # A file with non-ascii characters
557 self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")])
558
559 # A file with no permissions to do anything
560 self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")])
561 self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")])
562
563 self._volume_client_python(self.mount_b, dedent("""
564 vp = VolumePath("{group_id}", u"{volume_id}")
565 vc.delete_volume(vp)
566 vc.purge_volume(vp)
567 """.format(
568 group_id=group_id,
569 volume_id=volume_id
570 )))
571
572 # Check it's really gone
573 self.assertEqual(self.mount_a.ls("volumes/_deleting"), [])
574 self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id])
575
576 def test_readonly_authorization(self):
577 """
578 That guest clients can be restricted to read-only mounts of volumes.
579 """
580
581 volumeclient_mount = self.mounts[1]
582 guest_mount = self.mounts[2]
583 volumeclient_mount.umount_wait()
584 guest_mount.umount_wait()
585
586 # Configure volumeclient_mount as the handle for driving volumeclient.
587 self._configure_vc_auth(volumeclient_mount, "manila")
588
589 guest_entity = "guest"
590 group_id = "grpid"
591 volume_id = "volid"
592
593 # Create a volume.
594 mount_path = self._volume_client_python(volumeclient_mount, dedent("""
595 vp = VolumePath("{group_id}", "{volume_id}")
596 create_result = vc.create_volume(vp, 1024*1024*10)
597 print create_result['mount_path']
598 """.format(
599 group_id=group_id,
600 volume_id=volume_id,
601 )))
602
603 # Authorize and configure credentials for the guest to mount the
604 # the volume with read-write access.
605 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
606 mount_path, readonly=False)
607
608 # Mount the volume, and write to it.
609 guest_mount.mount(mount_path=mount_path)
610 guest_mount.write_n_mb("data.bin", 1)
611
612 # Change the guest auth ID's authorization to read-only mount access.
613 self._volume_client_python(volumeclient_mount, dedent("""
614 vp = VolumePath("{group_id}", "{volume_id}")
615 vc.deauthorize(vp, "{guest_entity}")
616 """.format(
617 group_id=group_id,
618 volume_id=volume_id,
619 guest_entity=guest_entity
620 )))
621 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
622 mount_path, readonly=True)
623
624 # The effect of the change in access level to read-only is not
625 # immediate. The guest sees the change only after a remount of
626 # the volume.
627 guest_mount.umount_wait()
628 guest_mount.mount(mount_path=mount_path)
629
630 # Read existing content of the volume.
631 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
632 # Cannot write into read-only volume.
633 with self.assertRaises(CommandFailedError):
634 guest_mount.write_n_mb("rogue.bin", 1)
635
636 def test_get_authorized_ids(self):
637 """
638 That for a volume, the authorized IDs and their access levels
639 can be obtained using CephFSVolumeClient's get_authorized_ids().
640 """
641 volumeclient_mount = self.mounts[1]
642 volumeclient_mount.umount_wait()
643
644 # Configure volumeclient_mount as the handle for driving volumeclient.
645 self._configure_vc_auth(volumeclient_mount, "manila")
646
647 group_id = "grpid"
648 volume_id = "volid"
649 guest_entity_1 = "guest1"
650 guest_entity_2 = "guest2"
651
652 log.info("print group ID: {0}".format(group_id))
653
654 # Create a volume.
655 auths = self._volume_client_python(volumeclient_mount, dedent("""
656 vp = VolumePath("{group_id}", "{volume_id}")
657 vc.create_volume(vp, 1024*1024*10)
658 auths = vc.get_authorized_ids(vp)
659 print auths
660 """.format(
661 group_id=group_id,
662 volume_id=volume_id,
663 )))
664 # Check the list of authorized IDs for the volume.
665 expected_result = None
666 self.assertEqual(str(expected_result), auths)
667
668 # Allow two auth IDs access to the volume.
669 auths = self._volume_client_python(volumeclient_mount, dedent("""
670 vp = VolumePath("{group_id}", "{volume_id}")
671 vc.authorize(vp, "{guest_entity_1}", readonly=False)
672 vc.authorize(vp, "{guest_entity_2}", readonly=True)
673 auths = vc.get_authorized_ids(vp)
674 print auths
675 """.format(
676 group_id=group_id,
677 volume_id=volume_id,
678 guest_entity_1=guest_entity_1,
679 guest_entity_2=guest_entity_2,
680 )))
681 # Check the list of authorized IDs and their access levels.
682 expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
683 self.assertItemsEqual(str(expected_result), auths)
684
685 # Disallow both the auth IDs' access to the volume.
686 auths = self._volume_client_python(volumeclient_mount, dedent("""
687 vp = VolumePath("{group_id}", "{volume_id}")
688 vc.deauthorize(vp, "{guest_entity_1}")
689 vc.deauthorize(vp, "{guest_entity_2}")
690 auths = vc.get_authorized_ids(vp)
691 print auths
692 """.format(
693 group_id=group_id,
694 volume_id=volume_id,
695 guest_entity_1=guest_entity_1,
696 guest_entity_2=guest_entity_2,
697 )))
698 # Check the list of authorized IDs for the volume.
699 expected_result = None
700 self.assertItemsEqual(str(expected_result), auths)
701
702 def test_multitenant_volumes(self):
703 """
704 That volume access can be restricted to a tenant.
705
706 That metadata used to enforce tenant isolation of
707 volumes is stored as a two-way mapping between auth
708 IDs and volumes that they're authorized to access.
709 """
710 volumeclient_mount = self.mounts[1]
711 volumeclient_mount.umount_wait()
712
713 # Configure volumeclient_mount as the handle for driving volumeclient.
714 self._configure_vc_auth(volumeclient_mount, "manila")
715
716 group_id = "groupid"
717 volume_id = "volumeid"
718
719 # Guest clients belonging to different tenants, but using the same
720 # auth ID.
721 auth_id = "guest"
722 guestclient_1 = {
723 "auth_id": auth_id,
724 "tenant_id": "tenant1",
725 }
726 guestclient_2 = {
727 "auth_id": auth_id,
728 "tenant_id": "tenant2",
729 }
730
731 # Create a volume.
732 self._volume_client_python(volumeclient_mount, dedent("""
733 vp = VolumePath("{group_id}", "{volume_id}")
734 vc.create_volume(vp, 1024*1024*10)
735 """.format(
736 group_id=group_id,
737 volume_id=volume_id,
738 )))
739
740 # Check that volume metadata file is created on volume creation.
741 vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id)
742 self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
743
744 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
745 # 'tenant1', with 'rw' access to the volume.
746 self._volume_client_python(volumeclient_mount, dedent("""
747 vp = VolumePath("{group_id}", "{volume_id}")
748 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
749 """.format(
750 group_id=group_id,
751 volume_id=volume_id,
752 auth_id=guestclient_1["auth_id"],
753 tenant_id=guestclient_1["tenant_id"]
754 )))
755
756 # Check that auth metadata file for auth ID 'guest', is
757 # created on authorizing 'guest' access to the volume.
758 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
759 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
760
761 # Verify that the auth metadata file stores the tenant ID that the
762 # auth ID belongs to, the auth ID's authorized access levels
763 # for different volumes, versioning details, etc.
764 expected_auth_metadata = {
765 u"version": 1,
766 u"compat_version": 1,
767 u"dirty": False,
768 u"tenant_id": u"tenant1",
769 u"volumes": {
770 u"groupid/volumeid": {
771 u"dirty": False,
772 u"access_level": u"rw",
773 }
774 }
775 }
776
777 auth_metadata = self._volume_client_python(volumeclient_mount, dedent("""
778 vp = VolumePath("{group_id}", "{volume_id}")
779 auth_metadata = vc._auth_metadata_get("{auth_id}")
780 print auth_metadata
781 """.format(
782 group_id=group_id,
783 volume_id=volume_id,
784 auth_id=guestclient_1["auth_id"],
785 )))
786
787 self.assertItemsEqual(str(expected_auth_metadata), auth_metadata)
788
789 # Verify that the volume metadata file stores info about auth IDs
790 # and their access levels to the volume, versioning details, etc.
791 expected_vol_metadata = {
792 u"version": 1,
793 u"compat_version": 1,
794 u"auths": {
795 u"guest": {
796 u"dirty": False,
797 u"access_level": u"rw"
798 }
799 }
800 }
801
802 vol_metadata = self._volume_client_python(volumeclient_mount, dedent("""
803 vp = VolumePath("{group_id}", "{volume_id}")
804 volume_metadata = vc._volume_metadata_get(vp)
805 print volume_metadata
806 """.format(
807 group_id=group_id,
808 volume_id=volume_id,
809 )))
810 self.assertItemsEqual(str(expected_vol_metadata), vol_metadata)
811
812 # Cannot authorize 'guestclient_2' to access the volume.
813 # It uses auth ID 'guest', which has already been used by a
814 # 'guestclient_1' belonging to an another tenant for accessing
815 # the volume.
816 with self.assertRaises(CommandFailedError):
817 self._volume_client_python(volumeclient_mount, dedent("""
818 vp = VolumePath("{group_id}", "{volume_id}")
819 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
820 """.format(
821 group_id=group_id,
822 volume_id=volume_id,
823 auth_id=guestclient_2["auth_id"],
824 tenant_id=guestclient_2["tenant_id"]
825 )))
826
827 # Check that auth metadata file is cleaned up on removing
828 # auth ID's only access to a volume.
829 self._volume_client_python(volumeclient_mount, dedent("""
830 vp = VolumePath("{group_id}", "{volume_id}")
831 vc.deauthorize(vp, "{guest_entity}")
832 """.format(
833 group_id=group_id,
834 volume_id=volume_id,
835 guest_entity=guestclient_1["auth_id"]
836 )))
837
838 self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
839
840 # Check that volume metadata file is cleaned up on volume deletion.
841 self._volume_client_python(volumeclient_mount, dedent("""
842 vp = VolumePath("{group_id}", "{volume_id}")
843 vc.delete_volume(vp)
844 """.format(
845 group_id=group_id,
846 volume_id=volume_id,
847 )))
848 self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
849
850 def test_recover_metadata(self):
851 """
852 That volume client can recover from partial auth updates using
853 metadata files, which store auth info and its update status info.
854 """
855 volumeclient_mount = self.mounts[1]
856 volumeclient_mount.umount_wait()
857
858 # Configure volumeclient_mount as the handle for driving volumeclient.
859 self._configure_vc_auth(volumeclient_mount, "manila")
860
861 group_id = "groupid"
862 volume_id = "volumeid"
863
864 guestclient = {
865 "auth_id": "guest",
866 "tenant_id": "tenant",
867 }
868
869 # Create a volume.
870 self._volume_client_python(volumeclient_mount, dedent("""
871 vp = VolumePath("{group_id}", "{volume_id}")
872 vc.create_volume(vp, 1024*1024*10)
873 """.format(
874 group_id=group_id,
875 volume_id=volume_id,
876 )))
877
878 # Authorize 'guestclient' access to the volume.
879 self._volume_client_python(volumeclient_mount, dedent("""
880 vp = VolumePath("{group_id}", "{volume_id}")
881 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
882 """.format(
883 group_id=group_id,
884 volume_id=volume_id,
885 auth_id=guestclient["auth_id"],
886 tenant_id=guestclient["tenant_id"]
887 )))
888
889 # Check that auth metadata file for auth ID 'guest' is created.
890 auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"])
891 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
892
893 # Induce partial auth update state by modifying the auth metadata file,
894 # and then run recovery procedure.
895 self._volume_client_python(volumeclient_mount, dedent("""
896 vp = VolumePath("{group_id}", "{volume_id}")
897 auth_metadata = vc._auth_metadata_get("{auth_id}")
898 auth_metadata['dirty'] = True
899 vc._auth_metadata_set("{auth_id}", auth_metadata)
900 vc.recover()
901 """.format(
902 group_id=group_id,
903 volume_id=volume_id,
904 auth_id=guestclient["auth_id"],
905 )))