]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
3c639bbfc418bb06c173bf72a9cd1f59a5cc4543
[ceph.git] / ceph / qa / tasks / cephfs / test_volume_client.py
1 import json
2 import logging
3 import time
4 import os
5 from textwrap import dedent
6 from tasks.cephfs.cephfs_test_case import CephFSTestCase
7 from tasks.cephfs.fuse_mount import FuseMount
8 from teuthology.exceptions import CommandFailedError
9
10 log = logging.getLogger(__name__)
11
12
13 class TestVolumeClient(CephFSTestCase):
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
16 CLIENTS_REQUIRED = 4
17
18 def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
19 # Can't dedent this *and* the script we pass in, because they might have different
20 # levels of indentation to begin with, so leave this string zero-indented
21 if vol_prefix:
22 vol_prefix = "\"" + vol_prefix + "\""
23 if ns_prefix:
24 ns_prefix = "\"" + ns_prefix + "\""
25 return client.run_python("""
26 from ceph_volume_client import CephFSVolumeClient, VolumePath
27 import logging
28 log = logging.getLogger("ceph_volume_client")
29 log.addHandler(logging.StreamHandler())
30 log.setLevel(logging.DEBUG)
31 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
32 vc.connect()
33 {payload}
34 vc.disconnect()
35 """.format(payload=script, conf_path=client.config_path, vol_prefix=vol_prefix, ns_prefix=ns_prefix))
36
37 def _sudo_write_file(self, remote, path, data):
38 """
39 Write data to a remote file as super user
40
41 :param remote: Remote site.
42 :param path: Path on the remote being written to.
43 :param data: Data to be written.
44
45 Both perms and owner are passed directly to chmod.
46 """
47 remote.run(
48 args=[
49 'sudo',
50 'python',
51 '-c',
52 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
53 path,
54 ],
55 stdin=data,
56 )
57
58 def _configure_vc_auth(self, mount, id_name):
59 """
60 Set up auth credentials for the VolumeClient user
61 """
62 out = self.fs.mon_manager.raw_cluster_cmd(
63 "auth", "get-or-create", "client.{name}".format(name=id_name),
64 "mds", "allow *",
65 "osd", "allow rw",
66 "mon", "allow *"
67 )
68 mount.client_id = id_name
69 self._sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
70 self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
71
72 def _configure_guest_auth(self, volumeclient_mount, guest_mount,
73 guest_entity, mount_path,
74 namespace_prefix=None, readonly=False,
75 tenant_id=None):
76 """
77 Set up auth credentials for the guest client to mount a volume.
78
79 :param volumeclient_mount: mount used as the handle for driving
80 volumeclient.
81 :param guest_mount: mount used by the guest client.
82 :param guest_entity: auth ID used by the guest client.
83 :param mount_path: path of the volume.
84 :param namespace_prefix: name prefix of the RADOS namespace, which
85 is used for the volume's layout.
86 :param readonly: defaults to False. If set to 'True' only read-only
87 mount access is granted to the guest.
88 :param tenant_id: (OpenStack) tenant ID of the guest client.
89 """
90
91 head, volume_id = os.path.split(mount_path)
92 head, group_id = os.path.split(head)
93 head, volume_prefix = os.path.split(head)
94 volume_prefix = "/" + volume_prefix
95
96 # Authorize the guest client's auth ID to mount the volume.
97 key = self._volume_client_python(volumeclient_mount, dedent("""
98 vp = VolumePath("{group_id}", "{volume_id}")
99 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
100 tenant_id="{tenant_id}")
101 print auth_result['auth_key']
102 """.format(
103 group_id=group_id,
104 volume_id=volume_id,
105 guest_entity=guest_entity,
106 readonly=readonly,
107 tenant_id=tenant_id)), volume_prefix, namespace_prefix
108 )
109
110 # CephFSVolumeClient's authorize() does not return the secret
111 # key to a caller who isn't multi-tenant aware. Explicitly
112 # query the key for such a client.
113 if not tenant_id:
114 key = self.fs.mon_manager.raw_cluster_cmd(
115 "auth", "get-key", "client.{name}".format(name=guest_entity),
116 )
117
118 # The guest auth ID should exist.
119 existing_ids = [a['entity'] for a in self.auth_list()]
120 self.assertIn("client.{0}".format(guest_entity), existing_ids)
121
122 # Create keyring file for the guest client.
123 keyring_txt = dedent("""
124 [client.{guest_entity}]
125 key = {key}
126
127 """.format(
128 guest_entity=guest_entity,
129 key=key
130 ))
131 guest_mount.client_id = guest_entity
132 self._sudo_write_file(guest_mount.client_remote,
133 guest_mount.get_keyring_path(),
134 keyring_txt)
135
136 # Add a guest client section to the ceph config file.
137 self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
138 self.set_conf("client.{0}".format(guest_entity), "debug client", "20")
139 self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20")
140 self.set_conf("client.{0}".format(guest_entity),
141 "keyring", guest_mount.get_keyring_path())
142
143 def test_default_prefix(self):
144 group_id = "grpid"
145 volume_id = "volid"
146 DEFAULT_VOL_PREFIX = "volumes"
147 DEFAULT_NS_PREFIX = "fsvolumens_"
148
149 self.mount_b.umount_wait()
150 self._configure_vc_auth(self.mount_b, "manila")
151
152 #create a volume with default prefix
153 self._volume_client_python(self.mount_b, dedent("""
154 vp = VolumePath("{group_id}", "{volume_id}")
155 vc.create_volume(vp, 10, data_isolated=True)
156 """.format(
157 group_id=group_id,
158 volume_id=volume_id,
159 )))
160
161 # The dir should be created
162 self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id))
163
164 #namespace should be set
165 ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace")
166 namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id)
167 self.assertEqual(namespace, ns_in_attr)
168
169
170 def test_lifecycle(self):
171 """
172 General smoke test for create, extend, destroy
173 """
174
175 # I'm going to use mount_c later as a guest for mounting the created
176 # shares
177 self.mounts[2].umount_wait()
178
179 # I'm going to leave mount_b unmounted and just use it as a handle for
180 # driving volumeclient. It's a little hacky but we don't have a more
181 # general concept for librados/libcephfs clients as opposed to full
182 # blown mounting clients.
183 self.mount_b.umount_wait()
184 self._configure_vc_auth(self.mount_b, "manila")
185
186 guest_entity = "guest"
187 group_id = "grpid"
188 volume_id = "volid"
189
190 volume_prefix = "/myprefix"
191 namespace_prefix = "mynsprefix_"
192
193 # Create a 100MB volume
194 volume_size = 100
195 mount_path = self._volume_client_python(self.mount_b, dedent("""
196 vp = VolumePath("{group_id}", "{volume_id}")
197 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
198 print create_result['mount_path']
199 """.format(
200 group_id=group_id,
201 volume_id=volume_id,
202 volume_size=volume_size
203 )), volume_prefix, namespace_prefix)
204
205 # The dir should be created
206 self.mount_a.stat(os.path.join("myprefix", group_id, volume_id))
207
208 # Authorize and configure credentials for the guest to mount the
209 # the volume.
210 self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
211 mount_path, namespace_prefix)
212 self.mounts[2].mount(mount_path=mount_path)
213
214 # The kernel client doesn't have the quota-based df behaviour,
215 # or quotas at all, so only exercise the client behaviour when
216 # running fuse.
217 if isinstance(self.mounts[2], FuseMount):
218 # df should see volume size, same as the quota set on volume's dir
219 self.assertEqual(self.mounts[2].df()['total'],
220 volume_size * 1024 * 1024)
221 self.assertEqual(
222 self.mount_a.getfattr(
223 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
224 "ceph.quota.max_bytes"),
225 "%s" % (volume_size * 1024 * 1024))
226
227 # df granularity is 4MB block so have to write at least that much
228 data_bin_mb = 4
229 self.mounts[2].write_n_mb("data.bin", data_bin_mb)
230
231 # Write something outside volume to check this space usage is
232 # not reported in the volume's DF.
233 other_bin_mb = 6
234 self.mount_a.write_n_mb("other.bin", other_bin_mb)
235
236 # global: df should see all the writes (data + other). This is a >
237 # rather than a == because the global spaced used includes all pools
238 self.assertGreater(self.mount_a.df()['used'],
239 (data_bin_mb + other_bin_mb) * 1024 * 1024)
240
241 # Hack: do a metadata IO to kick rstats
242 self.mounts[2].run_shell(["touch", "foo"])
243
244 # volume: df should see the data_bin_mb consumed from quota, same
245 # as the rbytes for the volume's dir
246 self.wait_until_equal(
247 lambda: self.mounts[2].df()['used'],
248 data_bin_mb * 1024 * 1024, timeout=60)
249 self.wait_until_equal(
250 lambda: self.mount_a.getfattr(
251 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
252 "ceph.dir.rbytes"),
253 "%s" % (data_bin_mb * 1024 * 1024), timeout=60)
254
255 # sync so that file data are persist to rados
256 self.mounts[2].run_shell(["sync"])
257
258 # Our data should stay in particular rados namespace
259 pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool")
260 namespace = "{0}{1}".format(namespace_prefix, volume_id)
261 ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace")
262 self.assertEqual(namespace, ns_in_attr)
263
264 objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n"))
265 self.assertNotEqual(objects_in_ns, set())
266
267 # De-authorize the guest
268 self._volume_client_python(self.mount_b, dedent("""
269 vp = VolumePath("{group_id}", "{volume_id}")
270 vc.deauthorize(vp, "{guest_entity}")
271 vc.evict("{guest_entity}")
272 """.format(
273 group_id=group_id,
274 volume_id=volume_id,
275 guest_entity=guest_entity
276 )), volume_prefix, namespace_prefix)
277
278 # Once deauthorized, the client should be unable to do any more metadata ops
279 # The way that the client currently behaves here is to block (it acts like
280 # it has lost network, because there is nothing to tell it that is messages
281 # are being dropped because it's identity is gone)
282 background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False)
283 time.sleep(10) # Approximate check for 'stuck' as 'still running after 10s'
284 self.assertFalse(background.finished)
285
286 # After deauthorisation, the client ID should be gone (this was the only
287 # volume it was authorised for)
288 self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()])
289
290 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
291 self.mounts[2].kill()
292 self.mounts[2].kill_cleanup()
293 try:
294 background.wait()
295 except CommandFailedError:
296 # We killed the mount out from under you
297 pass
298
299 self._volume_client_python(self.mount_b, dedent("""
300 vp = VolumePath("{group_id}", "{volume_id}")
301 vc.delete_volume(vp)
302 vc.purge_volume(vp)
303 """.format(
304 group_id=group_id,
305 volume_id=volume_id,
306 )), volume_prefix, namespace_prefix)
307
308 def test_idempotency(self):
309 """
310 That the volumeclient interface works when calling everything twice
311 """
312 self.mount_b.umount_wait()
313 self._configure_vc_auth(self.mount_b, "manila")
314
315 guest_entity = "guest"
316 group_id = "grpid"
317 volume_id = "volid"
318 self._volume_client_python(self.mount_b, dedent("""
319 vp = VolumePath("{group_id}", "{volume_id}")
320 vc.create_volume(vp, 10)
321 vc.create_volume(vp, 10)
322 vc.authorize(vp, "{guest_entity}")
323 vc.authorize(vp, "{guest_entity}")
324 vc.deauthorize(vp, "{guest_entity}")
325 vc.deauthorize(vp, "{guest_entity}")
326 vc.delete_volume(vp)
327 vc.delete_volume(vp)
328 vc.purge_volume(vp)
329 vc.purge_volume(vp)
330
331 vc.create_volume(vp, 10, data_isolated=True)
332 vc.create_volume(vp, 10, data_isolated=True)
333 vc.authorize(vp, "{guest_entity}")
334 vc.authorize(vp, "{guest_entity}")
335 vc.deauthorize(vp, "{guest_entity}")
336 vc.deauthorize(vp, "{guest_entity}")
337 vc.evict("{guest_entity}")
338 vc.evict("{guest_entity}")
339 vc.delete_volume(vp, data_isolated=True)
340 vc.delete_volume(vp, data_isolated=True)
341 vc.purge_volume(vp, data_isolated=True)
342 vc.purge_volume(vp, data_isolated=True)
343 """.format(
344 group_id=group_id,
345 volume_id=volume_id,
346 guest_entity=guest_entity
347 )))
348
349 def test_data_isolated(self):
350 """
351 That data isolated shares get their own pool
352 :return:
353 """
354
355 # Because the teuthology config template sets mon_pg_warn_max_per_osd to
356 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
357 # sane before using volume_client, to avoid creating pools with absurdly large
358 # numbers of PGs.
359 self.set_conf("global", "mon pg warn max per osd", "300")
360 for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
361 mon_daemon_state.restart()
362
363 self.mount_b.umount_wait()
364 self._configure_vc_auth(self.mount_b, "manila")
365
366 # Calculate how many PGs we'll expect the new volume pool to have
367 osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
368 max_per_osd = int(self.fs.get_config('mon_pg_warn_max_per_osd'))
369 osd_count = len(osd_map['osds'])
370 max_overall = osd_count * max_per_osd
371
372 existing_pg_count = 0
373 for p in osd_map['pools']:
374 existing_pg_count += p['pg_num']
375
376 expected_pg_num = (max_overall - existing_pg_count) / 10
377 log.info("max_per_osd {0}".format(max_per_osd))
378 log.info("osd_count {0}".format(osd_count))
379 log.info("max_overall {0}".format(max_overall))
380 log.info("existing_pg_count {0}".format(existing_pg_count))
381 log.info("expected_pg_num {0}".format(expected_pg_num))
382
383 pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
384
385 group_id = "grpid"
386 volume_id = "volid"
387 self._volume_client_python(self.mount_b, dedent("""
388 vp = VolumePath("{group_id}", "{volume_id}")
389 vc.create_volume(vp, 10, data_isolated=True)
390 """.format(
391 group_id=group_id,
392 volume_id=volume_id,
393 )))
394
395 pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
396
397 # Should have created one new pool
398 new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
399 self.assertEqual(len(new_pools), 1)
400
401 # It should have followed the heuristic for PG count
402 # (this is an overly strict test condition, so we may want to remove
403 # it at some point as/when the logic gets fancier)
404 created_pg_num = self.fs.mon_manager.get_pool_property(list(new_pools)[0], "pg_num")
405 self.assertEqual(expected_pg_num, created_pg_num)
406
407 def test_15303(self):
408 """
409 Reproducer for #15303 "Client holds incorrect complete flag on dir
410 after losing caps" (http://tracker.ceph.com/issues/15303)
411 """
412 for m in self.mounts:
413 m.umount_wait()
414
415 # Create a dir on mount A
416 self.mount_a.mount()
417 self.mount_a.run_shell(["mkdir", "parent1"])
418 self.mount_a.run_shell(["mkdir", "parent2"])
419 self.mount_a.run_shell(["mkdir", "parent1/mydir"])
420
421 # Put some files in it from mount B
422 self.mount_b.mount()
423 self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
424 self.mount_b.umount_wait()
425
426 # List the dir's contents on mount A
427 self.assertListEqual(self.mount_a.ls("parent1/mydir"),
428 ["afile"])
429
430 def test_evict_client(self):
431 """
432 That a volume client can be evicted based on its auth ID and the volume
433 path it has mounted.
434 """
435
436 if not isinstance(self.mount_a, FuseMount):
437 self.skipTest("Requires FUSE client to inject client metadata")
438
439 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
440 # and mounts[3] would be used as guests to mount the volumes/shares.
441
442 for i in range(1, 4):
443 self.mounts[i].umount_wait()
444
445 volumeclient_mount = self.mounts[1]
446 self._configure_vc_auth(volumeclient_mount, "manila")
447 guest_mounts = (self.mounts[2], self.mounts[3])
448
449 guest_entity = "guest"
450 group_id = "grpid"
451 mount_paths = []
452 volume_ids = []
453
454 # Create two volumes. Authorize 'guest' auth ID to mount the two
455 # volumes. Mount the two volumes. Write data to the volumes.
456 for i in range(2):
457 # Create volume.
458 volume_ids.append("volid_{0}".format(str(i)))
459 mount_paths.append(
460 self._volume_client_python(volumeclient_mount, dedent("""
461 vp = VolumePath("{group_id}", "{volume_id}")
462 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
463 print create_result['mount_path']
464 """.format(
465 group_id=group_id,
466 volume_id=volume_ids[i]
467 ))))
468
469 # Authorize 'guest' auth ID to mount the volume.
470 self._configure_guest_auth(volumeclient_mount, guest_mounts[i],
471 guest_entity, mount_paths[i])
472
473 # Mount the volume.
474 guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format(
475 id=guest_entity, suffix=str(i))
476 guest_mounts[i].mount(mount_path=mount_paths[i])
477 guest_mounts[i].write_n_mb("data.bin", 1)
478
479
480 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
481 # one volume.
482 self._volume_client_python(self.mount_b, dedent("""
483 vp = VolumePath("{group_id}", "{volume_id}")
484 vc.deauthorize(vp, "{guest_entity}")
485 vc.evict("{guest_entity}", volume_path=vp)
486 """.format(
487 group_id=group_id,
488 volume_id=volume_ids[0],
489 guest_entity=guest_entity
490 )))
491
492 # Evicted guest client, guest_mounts[0], should not be able to do
493 # anymore metadata ops. It should start failing all operations
494 # when it sees that its own address is in the blacklist.
495 try:
496 guest_mounts[0].write_n_mb("rogue.bin", 1)
497 except CommandFailedError:
498 pass
499 else:
500 raise RuntimeError("post-eviction write should have failed!")
501
502 # The blacklisted guest client should now be unmountable
503 guest_mounts[0].umount_wait()
504
505 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
506 # has mounted the other volume, should be able to use its volume
507 # unaffected.
508 guest_mounts[1].write_n_mb("data.bin.1", 1)
509
510 # Cleanup.
511 for i in range(2):
512 self._volume_client_python(volumeclient_mount, dedent("""
513 vp = VolumePath("{group_id}", "{volume_id}")
514 vc.deauthorize(vp, "{guest_entity}")
515 vc.delete_volume(vp)
516 vc.purge_volume(vp)
517 """.format(
518 group_id=group_id,
519 volume_id=volume_ids[i],
520 guest_entity=guest_entity
521 )))
522
523
524 def test_purge(self):
525 """
526 Reproducer for #15266, exception trying to purge volumes that
527 contain non-ascii filenames.
528
529 Additionally test any other purge corner cases here.
530 """
531 # I'm going to leave mount_b unmounted and just use it as a handle for
532 # driving volumeclient. It's a little hacky but we don't have a more
533 # general concept for librados/libcephfs clients as opposed to full
534 # blown mounting clients.
535 self.mount_b.umount_wait()
536 self._configure_vc_auth(self.mount_b, "manila")
537
538 group_id = "grpid"
539 # Use a unicode volume ID (like Manila), to reproduce #15266
540 volume_id = u"volid"
541
542 # Create
543 mount_path = self._volume_client_python(self.mount_b, dedent("""
544 vp = VolumePath("{group_id}", u"{volume_id}")
545 create_result = vc.create_volume(vp, 10)
546 print create_result['mount_path']
547 """.format(
548 group_id=group_id,
549 volume_id=volume_id
550 )))
551
552 # Strip leading "/"
553 mount_path = mount_path[1:]
554
555 # A file with non-ascii characters
556 self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")])
557
558 # A file with no permissions to do anything
559 self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")])
560 self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")])
561
562 self._volume_client_python(self.mount_b, dedent("""
563 vp = VolumePath("{group_id}", u"{volume_id}")
564 vc.delete_volume(vp)
565 vc.purge_volume(vp)
566 """.format(
567 group_id=group_id,
568 volume_id=volume_id
569 )))
570
571 # Check it's really gone
572 self.assertEqual(self.mount_a.ls("volumes/_deleting"), [])
573 self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id])
574
575 def test_readonly_authorization(self):
576 """
577 That guest clients can be restricted to read-only mounts of volumes.
578 """
579
580 volumeclient_mount = self.mounts[1]
581 guest_mount = self.mounts[2]
582 volumeclient_mount.umount_wait()
583 guest_mount.umount_wait()
584
585 # Configure volumeclient_mount as the handle for driving volumeclient.
586 self._configure_vc_auth(volumeclient_mount, "manila")
587
588 guest_entity = "guest"
589 group_id = "grpid"
590 volume_id = "volid"
591
592 # Create a volume.
593 mount_path = self._volume_client_python(volumeclient_mount, dedent("""
594 vp = VolumePath("{group_id}", "{volume_id}")
595 create_result = vc.create_volume(vp, 1024*1024*10)
596 print create_result['mount_path']
597 """.format(
598 group_id=group_id,
599 volume_id=volume_id,
600 )))
601
602 # Authorize and configure credentials for the guest to mount the
603 # the volume with read-write access.
604 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
605 mount_path, readonly=False)
606
607 # Mount the volume, and write to it.
608 guest_mount.mount(mount_path=mount_path)
609 guest_mount.write_n_mb("data.bin", 1)
610
611 # Change the guest auth ID's authorization to read-only mount access.
612 self._volume_client_python(volumeclient_mount, dedent("""
613 vp = VolumePath("{group_id}", "{volume_id}")
614 vc.deauthorize(vp, "{guest_entity}")
615 """.format(
616 group_id=group_id,
617 volume_id=volume_id,
618 guest_entity=guest_entity
619 )))
620 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
621 mount_path, readonly=True)
622
623 # The effect of the change in access level to read-only is not
624 # immediate. The guest sees the change only after a remount of
625 # the volume.
626 guest_mount.umount_wait()
627 guest_mount.mount(mount_path=mount_path)
628
629 # Read existing content of the volume.
630 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
631 # Cannot write into read-only volume.
632 with self.assertRaises(CommandFailedError):
633 guest_mount.write_n_mb("rogue.bin", 1)
634
635 def test_get_authorized_ids(self):
636 """
637 That for a volume, the authorized IDs and their access levels
638 can be obtained using CephFSVolumeClient's get_authorized_ids().
639 """
640 volumeclient_mount = self.mounts[1]
641 volumeclient_mount.umount_wait()
642
643 # Configure volumeclient_mount as the handle for driving volumeclient.
644 self._configure_vc_auth(volumeclient_mount, "manila")
645
646 group_id = "grpid"
647 volume_id = "volid"
648 guest_entity_1 = "guest1"
649 guest_entity_2 = "guest2"
650
651 log.info("print group ID: {0}".format(group_id))
652
653 # Create a volume.
654 auths = self._volume_client_python(volumeclient_mount, dedent("""
655 vp = VolumePath("{group_id}", "{volume_id}")
656 vc.create_volume(vp, 1024*1024*10)
657 auths = vc.get_authorized_ids(vp)
658 print auths
659 """.format(
660 group_id=group_id,
661 volume_id=volume_id,
662 )))
663 # Check the list of authorized IDs for the volume.
664 expected_result = None
665 self.assertEqual(str(expected_result), auths)
666
667 # Allow two auth IDs access to the volume.
668 auths = self._volume_client_python(volumeclient_mount, dedent("""
669 vp = VolumePath("{group_id}", "{volume_id}")
670 vc.authorize(vp, "{guest_entity_1}", readonly=False)
671 vc.authorize(vp, "{guest_entity_2}", readonly=True)
672 auths = vc.get_authorized_ids(vp)
673 print auths
674 """.format(
675 group_id=group_id,
676 volume_id=volume_id,
677 guest_entity_1=guest_entity_1,
678 guest_entity_2=guest_entity_2,
679 )))
680 # Check the list of authorized IDs and their access levels.
681 expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
682 self.assertItemsEqual(str(expected_result), auths)
683
684 # Disallow both the auth IDs' access to the volume.
685 auths = self._volume_client_python(volumeclient_mount, dedent("""
686 vp = VolumePath("{group_id}", "{volume_id}")
687 vc.deauthorize(vp, "{guest_entity_1}")
688 vc.deauthorize(vp, "{guest_entity_2}")
689 auths = vc.get_authorized_ids(vp)
690 print auths
691 """.format(
692 group_id=group_id,
693 volume_id=volume_id,
694 guest_entity_1=guest_entity_1,
695 guest_entity_2=guest_entity_2,
696 )))
697 # Check the list of authorized IDs for the volume.
698 expected_result = None
699 self.assertItemsEqual(str(expected_result), auths)
700
701 def test_multitenant_volumes(self):
702 """
703 That volume access can be restricted to a tenant.
704
705 That metadata used to enforce tenant isolation of
706 volumes is stored as a two-way mapping between auth
707 IDs and volumes that they're authorized to access.
708 """
709 volumeclient_mount = self.mounts[1]
710 volumeclient_mount.umount_wait()
711
712 # Configure volumeclient_mount as the handle for driving volumeclient.
713 self._configure_vc_auth(volumeclient_mount, "manila")
714
715 group_id = "groupid"
716 volume_id = "volumeid"
717
718 # Guest clients belonging to different tenants, but using the same
719 # auth ID.
720 auth_id = "guest"
721 guestclient_1 = {
722 "auth_id": auth_id,
723 "tenant_id": "tenant1",
724 }
725 guestclient_2 = {
726 "auth_id": auth_id,
727 "tenant_id": "tenant2",
728 }
729
730 # Create a volume.
731 self._volume_client_python(volumeclient_mount, dedent("""
732 vp = VolumePath("{group_id}", "{volume_id}")
733 vc.create_volume(vp, 1024*1024*10)
734 """.format(
735 group_id=group_id,
736 volume_id=volume_id,
737 )))
738
739 # Check that volume metadata file is created on volume creation.
740 vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id)
741 self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
742
743 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
744 # 'tenant1', with 'rw' access to the volume.
745 self._volume_client_python(volumeclient_mount, dedent("""
746 vp = VolumePath("{group_id}", "{volume_id}")
747 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
748 """.format(
749 group_id=group_id,
750 volume_id=volume_id,
751 auth_id=guestclient_1["auth_id"],
752 tenant_id=guestclient_1["tenant_id"]
753 )))
754
755 # Check that auth metadata file for auth ID 'guest', is
756 # created on authorizing 'guest' access to the volume.
757 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
758 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
759
760 # Verify that the auth metadata file stores the tenant ID that the
761 # auth ID belongs to, the auth ID's authorized access levels
762 # for different volumes, versioning details, etc.
763 expected_auth_metadata = {
764 u"version": 1,
765 u"compat_version": 1,
766 u"dirty": False,
767 u"tenant_id": u"tenant1",
768 u"volumes": {
769 u"groupid/volumeid": {
770 u"dirty": False,
771 u"access_level": u"rw",
772 }
773 }
774 }
775
776 auth_metadata = self._volume_client_python(volumeclient_mount, dedent("""
777 vp = VolumePath("{group_id}", "{volume_id}")
778 auth_metadata = vc._auth_metadata_get("{auth_id}")
779 print auth_metadata
780 """.format(
781 group_id=group_id,
782 volume_id=volume_id,
783 auth_id=guestclient_1["auth_id"],
784 )))
785
786 self.assertItemsEqual(str(expected_auth_metadata), auth_metadata)
787
788 # Verify that the volume metadata file stores info about auth IDs
789 # and their access levels to the volume, versioning details, etc.
790 expected_vol_metadata = {
791 u"version": 1,
792 u"compat_version": 1,
793 u"auths": {
794 u"guest": {
795 u"dirty": False,
796 u"access_level": u"rw"
797 }
798 }
799 }
800
801 vol_metadata = self._volume_client_python(volumeclient_mount, dedent("""
802 vp = VolumePath("{group_id}", "{volume_id}")
803 volume_metadata = vc._volume_metadata_get(vp)
804 print volume_metadata
805 """.format(
806 group_id=group_id,
807 volume_id=volume_id,
808 )))
809 self.assertItemsEqual(str(expected_vol_metadata), vol_metadata)
810
811 # Cannot authorize 'guestclient_2' to access the volume.
812 # It uses auth ID 'guest', which has already been used by a
813 # 'guestclient_1' belonging to an another tenant for accessing
814 # the volume.
815 with self.assertRaises(CommandFailedError):
816 self._volume_client_python(volumeclient_mount, dedent("""
817 vp = VolumePath("{group_id}", "{volume_id}")
818 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
819 """.format(
820 group_id=group_id,
821 volume_id=volume_id,
822 auth_id=guestclient_2["auth_id"],
823 tenant_id=guestclient_2["tenant_id"]
824 )))
825
826 # Check that auth metadata file is cleaned up on removing
827 # auth ID's only access to a volume.
828 self._volume_client_python(volumeclient_mount, dedent("""
829 vp = VolumePath("{group_id}", "{volume_id}")
830 vc.deauthorize(vp, "{guest_entity}")
831 """.format(
832 group_id=group_id,
833 volume_id=volume_id,
834 guest_entity=guestclient_1["auth_id"]
835 )))
836
837 self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
838
839 # Check that volume metadata file is cleaned up on volume deletion.
840 self._volume_client_python(volumeclient_mount, dedent("""
841 vp = VolumePath("{group_id}", "{volume_id}")
842 vc.delete_volume(vp)
843 """.format(
844 group_id=group_id,
845 volume_id=volume_id,
846 )))
847 self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
848
849 def test_recover_metadata(self):
850 """
851 That volume client can recover from partial auth updates using
852 metadata files, which store auth info and its update status info.
853 """
854 volumeclient_mount = self.mounts[1]
855 volumeclient_mount.umount_wait()
856
857 # Configure volumeclient_mount as the handle for driving volumeclient.
858 self._configure_vc_auth(volumeclient_mount, "manila")
859
860 group_id = "groupid"
861 volume_id = "volumeid"
862
863 guestclient = {
864 "auth_id": "guest",
865 "tenant_id": "tenant",
866 }
867
868 # Create a volume.
869 self._volume_client_python(volumeclient_mount, dedent("""
870 vp = VolumePath("{group_id}", "{volume_id}")
871 vc.create_volume(vp, 1024*1024*10)
872 """.format(
873 group_id=group_id,
874 volume_id=volume_id,
875 )))
876
877 # Authorize 'guestclient' access to the volume.
878 self._volume_client_python(volumeclient_mount, dedent("""
879 vp = VolumePath("{group_id}", "{volume_id}")
880 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
881 """.format(
882 group_id=group_id,
883 volume_id=volume_id,
884 auth_id=guestclient["auth_id"],
885 tenant_id=guestclient["tenant_id"]
886 )))
887
888 # Check that auth metadata file for auth ID 'guest' is created.
889 auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"])
890 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
891
892 # Induce partial auth update state by modifying the auth metadata file,
893 # and then run recovery procedure.
894 self._volume_client_python(volumeclient_mount, dedent("""
895 vp = VolumePath("{group_id}", "{volume_id}")
896 auth_metadata = vc._auth_metadata_get("{auth_id}")
897 auth_metadata['dirty'] = True
898 vc._auth_metadata_set("{auth_id}", auth_metadata)
899 vc.recover()
900 """.format(
901 group_id=group_id,
902 volume_id=volume_id,
903 auth_id=guestclient["auth_id"],
904 )))