]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_volume_client.py
update sources to v12.1.3
[ceph.git] / ceph / qa / tasks / cephfs / test_volume_client.py
1 import json
2 import logging
3 import time
4 import os
5 from textwrap import dedent
6 from tasks.cephfs.cephfs_test_case import CephFSTestCase
7 from tasks.cephfs.fuse_mount import FuseMount
8 from teuthology.exceptions import CommandFailedError
9
10 log = logging.getLogger(__name__)
11
12
13 class TestVolumeClient(CephFSTestCase):
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
16 CLIENTS_REQUIRED = 4
17
18 def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
19 # Can't dedent this *and* the script we pass in, because they might have different
20 # levels of indentation to begin with, so leave this string zero-indented
21 if vol_prefix:
22 vol_prefix = "\"" + vol_prefix + "\""
23 if ns_prefix:
24 ns_prefix = "\"" + ns_prefix + "\""
25 return client.run_python("""
26 from ceph_volume_client import CephFSVolumeClient, VolumePath
27 import logging
28 log = logging.getLogger("ceph_volume_client")
29 log.addHandler(logging.StreamHandler())
30 log.setLevel(logging.DEBUG)
31 vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
32 vc.connect()
33 {payload}
34 vc.disconnect()
35 """.format(payload=script, conf_path=client.config_path, vol_prefix=vol_prefix, ns_prefix=ns_prefix))
36
37 def _sudo_write_file(self, remote, path, data):
38 """
39 Write data to a remote file as super user
40
41 :param remote: Remote site.
42 :param path: Path on the remote being written to.
43 :param data: Data to be written.
44
45 Both perms and owner are passed directly to chmod.
46 """
47 remote.run(
48 args=[
49 'sudo',
50 'python',
51 '-c',
52 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
53 path,
54 ],
55 stdin=data,
56 )
57
58 def _configure_vc_auth(self, mount, id_name):
59 """
60 Set up auth credentials for the VolumeClient user
61 """
62 out = self.fs.mon_manager.raw_cluster_cmd(
63 "auth", "get-or-create", "client.{name}".format(name=id_name),
64 "mds", "allow *",
65 "osd", "allow rw",
66 "mon", "allow *"
67 )
68 mount.client_id = id_name
69 self._sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
70 self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
71
72 def _configure_guest_auth(self, volumeclient_mount, guest_mount,
73 guest_entity, mount_path,
74 namespace_prefix=None, readonly=False,
75 tenant_id=None):
76 """
77 Set up auth credentials for the guest client to mount a volume.
78
79 :param volumeclient_mount: mount used as the handle for driving
80 volumeclient.
81 :param guest_mount: mount used by the guest client.
82 :param guest_entity: auth ID used by the guest client.
83 :param mount_path: path of the volume.
84 :param namespace_prefix: name prefix of the RADOS namespace, which
85 is used for the volume's layout.
86 :param readonly: defaults to False. If set to 'True' only read-only
87 mount access is granted to the guest.
88 :param tenant_id: (OpenStack) tenant ID of the guest client.
89 """
90
91 head, volume_id = os.path.split(mount_path)
92 head, group_id = os.path.split(head)
93 head, volume_prefix = os.path.split(head)
94 volume_prefix = "/" + volume_prefix
95
96 # Authorize the guest client's auth ID to mount the volume.
97 key = self._volume_client_python(volumeclient_mount, dedent("""
98 vp = VolumePath("{group_id}", "{volume_id}")
99 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
100 tenant_id="{tenant_id}")
101 print auth_result['auth_key']
102 """.format(
103 group_id=group_id,
104 volume_id=volume_id,
105 guest_entity=guest_entity,
106 readonly=readonly,
107 tenant_id=tenant_id)), volume_prefix, namespace_prefix
108 )
109
110 # CephFSVolumeClient's authorize() does not return the secret
111 # key to a caller who isn't multi-tenant aware. Explicitly
112 # query the key for such a client.
113 if not tenant_id:
114 key = self.fs.mon_manager.raw_cluster_cmd(
115 "auth", "get-key", "client.{name}".format(name=guest_entity),
116 )
117
118 # The guest auth ID should exist.
119 existing_ids = [a['entity'] for a in self.auth_list()]
120 self.assertIn("client.{0}".format(guest_entity), existing_ids)
121
122 # Create keyring file for the guest client.
123 keyring_txt = dedent("""
124 [client.{guest_entity}]
125 key = {key}
126
127 """.format(
128 guest_entity=guest_entity,
129 key=key
130 ))
131 guest_mount.client_id = guest_entity
132 self._sudo_write_file(guest_mount.client_remote,
133 guest_mount.get_keyring_path(),
134 keyring_txt)
135
136 # Add a guest client section to the ceph config file.
137 self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
138 self.set_conf("client.{0}".format(guest_entity), "debug client", "20")
139 self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20")
140 self.set_conf("client.{0}".format(guest_entity),
141 "keyring", guest_mount.get_keyring_path())
142
143 def test_default_prefix(self):
144 group_id = "grpid"
145 volume_id = "volid"
146 DEFAULT_VOL_PREFIX = "volumes"
147 DEFAULT_NS_PREFIX = "fsvolumens_"
148
149 self.mount_b.umount_wait()
150 self._configure_vc_auth(self.mount_b, "manila")
151
152 #create a volume with default prefix
153 self._volume_client_python(self.mount_b, dedent("""
154 vp = VolumePath("{group_id}", "{volume_id}")
155 vc.create_volume(vp, 10, data_isolated=True)
156 """.format(
157 group_id=group_id,
158 volume_id=volume_id,
159 )))
160
161 # The dir should be created
162 self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id))
163
164 #namespace should be set
165 ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace")
166 namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id)
167 self.assertEqual(namespace, ns_in_attr)
168
169
170 def test_lifecycle(self):
171 """
172 General smoke test for create, extend, destroy
173 """
174
175 # I'm going to use mount_c later as a guest for mounting the created
176 # shares
177 self.mounts[2].umount_wait()
178
179 # I'm going to leave mount_b unmounted and just use it as a handle for
180 # driving volumeclient. It's a little hacky but we don't have a more
181 # general concept for librados/libcephfs clients as opposed to full
182 # blown mounting clients.
183 self.mount_b.umount_wait()
184 self._configure_vc_auth(self.mount_b, "manila")
185
186 guest_entity = "guest"
187 group_id = "grpid"
188 volume_id = "volid"
189
190 volume_prefix = "/myprefix"
191 namespace_prefix = "mynsprefix_"
192
193 # Create a 100MB volume
194 volume_size = 100
195 mount_path = self._volume_client_python(self.mount_b, dedent("""
196 vp = VolumePath("{group_id}", "{volume_id}")
197 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
198 print create_result['mount_path']
199 """.format(
200 group_id=group_id,
201 volume_id=volume_id,
202 volume_size=volume_size
203 )), volume_prefix, namespace_prefix)
204
205 # The dir should be created
206 self.mount_a.stat(os.path.join("myprefix", group_id, volume_id))
207
208 # Authorize and configure credentials for the guest to mount the
209 # the volume.
210 self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
211 mount_path, namespace_prefix)
212 self.mounts[2].mount(mount_path=mount_path)
213
214 # The kernel client doesn't have the quota-based df behaviour,
215 # or quotas at all, so only exercise the client behaviour when
216 # running fuse.
217 if isinstance(self.mounts[2], FuseMount):
218 # df should see volume size, same as the quota set on volume's dir
219 self.assertEqual(self.mounts[2].df()['total'],
220 volume_size * 1024 * 1024)
221 self.assertEqual(
222 self.mount_a.getfattr(
223 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
224 "ceph.quota.max_bytes"),
225 "%s" % (volume_size * 1024 * 1024))
226
227 # df granularity is 4MB block so have to write at least that much
228 data_bin_mb = 4
229 self.mounts[2].write_n_mb("data.bin", data_bin_mb)
230
231 # Write something outside volume to check this space usage is
232 # not reported in the volume's DF.
233 other_bin_mb = 8
234 self.mount_a.write_n_mb("other.bin", other_bin_mb)
235
236 # global: df should see all the writes (data + other). This is a >
237 # rather than a == because the global spaced used includes all pools
238 def check_df():
239 used = self.mount_a.df()['used']
240 return used >= (other_bin_mb * 1024 * 1024)
241
242 self.wait_until_true(check_df, timeout=30)
243
244 # Hack: do a metadata IO to kick rstats
245 self.mounts[2].run_shell(["touch", "foo"])
246
247 # volume: df should see the data_bin_mb consumed from quota, same
248 # as the rbytes for the volume's dir
249 self.wait_until_equal(
250 lambda: self.mounts[2].df()['used'],
251 data_bin_mb * 1024 * 1024, timeout=60)
252 self.wait_until_equal(
253 lambda: self.mount_a.getfattr(
254 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
255 "ceph.dir.rbytes"),
256 "%s" % (data_bin_mb * 1024 * 1024), timeout=60)
257
258 # sync so that file data are persist to rados
259 self.mounts[2].run_shell(["sync"])
260
261 # Our data should stay in particular rados namespace
262 pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool")
263 namespace = "{0}{1}".format(namespace_prefix, volume_id)
264 ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace")
265 self.assertEqual(namespace, ns_in_attr)
266
267 objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n"))
268 self.assertNotEqual(objects_in_ns, set())
269
270 # De-authorize the guest
271 self._volume_client_python(self.mount_b, dedent("""
272 vp = VolumePath("{group_id}", "{volume_id}")
273 vc.deauthorize(vp, "{guest_entity}")
274 vc.evict("{guest_entity}")
275 """.format(
276 group_id=group_id,
277 volume_id=volume_id,
278 guest_entity=guest_entity
279 )), volume_prefix, namespace_prefix)
280
281 # Once deauthorized, the client should be unable to do any more metadata ops
282 # The way that the client currently behaves here is to block (it acts like
283 # it has lost network, because there is nothing to tell it that is messages
284 # are being dropped because it's identity is gone)
285 background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False)
286 time.sleep(10) # Approximate check for 'stuck' as 'still running after 10s'
287 self.assertFalse(background.finished)
288
289 # After deauthorisation, the client ID should be gone (this was the only
290 # volume it was authorised for)
291 self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()])
292
293 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
294 self.mounts[2].kill()
295 self.mounts[2].kill_cleanup()
296 try:
297 background.wait()
298 except CommandFailedError:
299 # We killed the mount out from under you
300 pass
301
302 self._volume_client_python(self.mount_b, dedent("""
303 vp = VolumePath("{group_id}", "{volume_id}")
304 vc.delete_volume(vp)
305 vc.purge_volume(vp)
306 """.format(
307 group_id=group_id,
308 volume_id=volume_id,
309 )), volume_prefix, namespace_prefix)
310
311 def test_idempotency(self):
312 """
313 That the volumeclient interface works when calling everything twice
314 """
315 self.mount_b.umount_wait()
316 self._configure_vc_auth(self.mount_b, "manila")
317
318 guest_entity = "guest"
319 group_id = "grpid"
320 volume_id = "volid"
321 self._volume_client_python(self.mount_b, dedent("""
322 vp = VolumePath("{group_id}", "{volume_id}")
323 vc.create_volume(vp, 10)
324 vc.create_volume(vp, 10)
325 vc.authorize(vp, "{guest_entity}")
326 vc.authorize(vp, "{guest_entity}")
327 vc.deauthorize(vp, "{guest_entity}")
328 vc.deauthorize(vp, "{guest_entity}")
329 vc.delete_volume(vp)
330 vc.delete_volume(vp)
331 vc.purge_volume(vp)
332 vc.purge_volume(vp)
333
334 vc.create_volume(vp, 10, data_isolated=True)
335 vc.create_volume(vp, 10, data_isolated=True)
336 vc.authorize(vp, "{guest_entity}")
337 vc.authorize(vp, "{guest_entity}")
338 vc.deauthorize(vp, "{guest_entity}")
339 vc.deauthorize(vp, "{guest_entity}")
340 vc.evict("{guest_entity}")
341 vc.evict("{guest_entity}")
342 vc.delete_volume(vp, data_isolated=True)
343 vc.delete_volume(vp, data_isolated=True)
344 vc.purge_volume(vp, data_isolated=True)
345 vc.purge_volume(vp, data_isolated=True)
346 """.format(
347 group_id=group_id,
348 volume_id=volume_id,
349 guest_entity=guest_entity
350 )))
351
352 def test_data_isolated(self):
353 """
354 That data isolated shares get their own pool
355 :return:
356 """
357
358 # Because the teuthology config template sets mon_pg_warn_max_per_osd to
359 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
360 # sane before using volume_client, to avoid creating pools with absurdly large
361 # numbers of PGs.
362 self.set_conf("global", "mon pg warn max per osd", "300")
363 for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
364 mon_daemon_state.restart()
365
366 self.mount_b.umount_wait()
367 self._configure_vc_auth(self.mount_b, "manila")
368
369 # Calculate how many PGs we'll expect the new volume pool to have
370 osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
371 max_per_osd = int(self.fs.get_config('mon_pg_warn_max_per_osd'))
372 osd_count = len(osd_map['osds'])
373 max_overall = osd_count * max_per_osd
374
375 existing_pg_count = 0
376 for p in osd_map['pools']:
377 existing_pg_count += p['pg_num']
378
379 expected_pg_num = (max_overall - existing_pg_count) / 10
380 log.info("max_per_osd {0}".format(max_per_osd))
381 log.info("osd_count {0}".format(osd_count))
382 log.info("max_overall {0}".format(max_overall))
383 log.info("existing_pg_count {0}".format(existing_pg_count))
384 log.info("expected_pg_num {0}".format(expected_pg_num))
385
386 pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
387
388 group_id = "grpid"
389 volume_id = "volid"
390 self._volume_client_python(self.mount_b, dedent("""
391 vp = VolumePath("{group_id}", "{volume_id}")
392 vc.create_volume(vp, 10, data_isolated=True)
393 """.format(
394 group_id=group_id,
395 volume_id=volume_id,
396 )))
397
398 pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
399
400 # Should have created one new pool
401 new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
402 self.assertEqual(len(new_pools), 1)
403
404 # It should have followed the heuristic for PG count
405 # (this is an overly strict test condition, so we may want to remove
406 # it at some point as/when the logic gets fancier)
407 created_pg_num = self.fs.mon_manager.get_pool_property(list(new_pools)[0], "pg_num")
408 self.assertEqual(expected_pg_num, created_pg_num)
409
410 def test_15303(self):
411 """
412 Reproducer for #15303 "Client holds incorrect complete flag on dir
413 after losing caps" (http://tracker.ceph.com/issues/15303)
414 """
415 for m in self.mounts:
416 m.umount_wait()
417
418 # Create a dir on mount A
419 self.mount_a.mount()
420 self.mount_a.run_shell(["mkdir", "parent1"])
421 self.mount_a.run_shell(["mkdir", "parent2"])
422 self.mount_a.run_shell(["mkdir", "parent1/mydir"])
423
424 # Put some files in it from mount B
425 self.mount_b.mount()
426 self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
427 self.mount_b.umount_wait()
428
429 # List the dir's contents on mount A
430 self.assertListEqual(self.mount_a.ls("parent1/mydir"),
431 ["afile"])
432
433 def test_evict_client(self):
434 """
435 That a volume client can be evicted based on its auth ID and the volume
436 path it has mounted.
437 """
438
439 if not isinstance(self.mount_a, FuseMount):
440 self.skipTest("Requires FUSE client to inject client metadata")
441
442 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
443 # and mounts[3] would be used as guests to mount the volumes/shares.
444
445 for i in range(1, 4):
446 self.mounts[i].umount_wait()
447
448 volumeclient_mount = self.mounts[1]
449 self._configure_vc_auth(volumeclient_mount, "manila")
450 guest_mounts = (self.mounts[2], self.mounts[3])
451
452 guest_entity = "guest"
453 group_id = "grpid"
454 mount_paths = []
455 volume_ids = []
456
457 # Create two volumes. Authorize 'guest' auth ID to mount the two
458 # volumes. Mount the two volumes. Write data to the volumes.
459 for i in range(2):
460 # Create volume.
461 volume_ids.append("volid_{0}".format(str(i)))
462 mount_paths.append(
463 self._volume_client_python(volumeclient_mount, dedent("""
464 vp = VolumePath("{group_id}", "{volume_id}")
465 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
466 print create_result['mount_path']
467 """.format(
468 group_id=group_id,
469 volume_id=volume_ids[i]
470 ))))
471
472 # Authorize 'guest' auth ID to mount the volume.
473 self._configure_guest_auth(volumeclient_mount, guest_mounts[i],
474 guest_entity, mount_paths[i])
475
476 # Mount the volume.
477 guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format(
478 id=guest_entity, suffix=str(i))
479 guest_mounts[i].mount(mount_path=mount_paths[i])
480 guest_mounts[i].write_n_mb("data.bin", 1)
481
482
483 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
484 # one volume.
485 self._volume_client_python(self.mount_b, dedent("""
486 vp = VolumePath("{group_id}", "{volume_id}")
487 vc.deauthorize(vp, "{guest_entity}")
488 vc.evict("{guest_entity}", volume_path=vp)
489 """.format(
490 group_id=group_id,
491 volume_id=volume_ids[0],
492 guest_entity=guest_entity
493 )))
494
495 # Evicted guest client, guest_mounts[0], should not be able to do
496 # anymore metadata ops. It should start failing all operations
497 # when it sees that its own address is in the blacklist.
498 try:
499 guest_mounts[0].write_n_mb("rogue.bin", 1)
500 except CommandFailedError:
501 pass
502 else:
503 raise RuntimeError("post-eviction write should have failed!")
504
505 # The blacklisted guest client should now be unmountable
506 guest_mounts[0].umount_wait()
507
508 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
509 # has mounted the other volume, should be able to use its volume
510 # unaffected.
511 guest_mounts[1].write_n_mb("data.bin.1", 1)
512
513 # Cleanup.
514 for i in range(2):
515 self._volume_client_python(volumeclient_mount, dedent("""
516 vp = VolumePath("{group_id}", "{volume_id}")
517 vc.deauthorize(vp, "{guest_entity}")
518 vc.delete_volume(vp)
519 vc.purge_volume(vp)
520 """.format(
521 group_id=group_id,
522 volume_id=volume_ids[i],
523 guest_entity=guest_entity
524 )))
525
526
527 def test_purge(self):
528 """
529 Reproducer for #15266, exception trying to purge volumes that
530 contain non-ascii filenames.
531
532 Additionally test any other purge corner cases here.
533 """
534 # I'm going to leave mount_b unmounted and just use it as a handle for
535 # driving volumeclient. It's a little hacky but we don't have a more
536 # general concept for librados/libcephfs clients as opposed to full
537 # blown mounting clients.
538 self.mount_b.umount_wait()
539 self._configure_vc_auth(self.mount_b, "manila")
540
541 group_id = "grpid"
542 # Use a unicode volume ID (like Manila), to reproduce #15266
543 volume_id = u"volid"
544
545 # Create
546 mount_path = self._volume_client_python(self.mount_b, dedent("""
547 vp = VolumePath("{group_id}", u"{volume_id}")
548 create_result = vc.create_volume(vp, 10)
549 print create_result['mount_path']
550 """.format(
551 group_id=group_id,
552 volume_id=volume_id
553 )))
554
555 # Strip leading "/"
556 mount_path = mount_path[1:]
557
558 # A file with non-ascii characters
559 self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")])
560
561 # A file with no permissions to do anything
562 self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")])
563 self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")])
564
565 self._volume_client_python(self.mount_b, dedent("""
566 vp = VolumePath("{group_id}", u"{volume_id}")
567 vc.delete_volume(vp)
568 vc.purge_volume(vp)
569 """.format(
570 group_id=group_id,
571 volume_id=volume_id
572 )))
573
574 # Check it's really gone
575 self.assertEqual(self.mount_a.ls("volumes/_deleting"), [])
576 self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id])
577
578 def test_readonly_authorization(self):
579 """
580 That guest clients can be restricted to read-only mounts of volumes.
581 """
582
583 volumeclient_mount = self.mounts[1]
584 guest_mount = self.mounts[2]
585 volumeclient_mount.umount_wait()
586 guest_mount.umount_wait()
587
588 # Configure volumeclient_mount as the handle for driving volumeclient.
589 self._configure_vc_auth(volumeclient_mount, "manila")
590
591 guest_entity = "guest"
592 group_id = "grpid"
593 volume_id = "volid"
594
595 # Create a volume.
596 mount_path = self._volume_client_python(volumeclient_mount, dedent("""
597 vp = VolumePath("{group_id}", "{volume_id}")
598 create_result = vc.create_volume(vp, 1024*1024*10)
599 print create_result['mount_path']
600 """.format(
601 group_id=group_id,
602 volume_id=volume_id,
603 )))
604
605 # Authorize and configure credentials for the guest to mount the
606 # the volume with read-write access.
607 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
608 mount_path, readonly=False)
609
610 # Mount the volume, and write to it.
611 guest_mount.mount(mount_path=mount_path)
612 guest_mount.write_n_mb("data.bin", 1)
613
614 # Change the guest auth ID's authorization to read-only mount access.
615 self._volume_client_python(volumeclient_mount, dedent("""
616 vp = VolumePath("{group_id}", "{volume_id}")
617 vc.deauthorize(vp, "{guest_entity}")
618 """.format(
619 group_id=group_id,
620 volume_id=volume_id,
621 guest_entity=guest_entity
622 )))
623 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
624 mount_path, readonly=True)
625
626 # The effect of the change in access level to read-only is not
627 # immediate. The guest sees the change only after a remount of
628 # the volume.
629 guest_mount.umount_wait()
630 guest_mount.mount(mount_path=mount_path)
631
632 # Read existing content of the volume.
633 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
634 # Cannot write into read-only volume.
635 with self.assertRaises(CommandFailedError):
636 guest_mount.write_n_mb("rogue.bin", 1)
637
638 def test_get_authorized_ids(self):
639 """
640 That for a volume, the authorized IDs and their access levels
641 can be obtained using CephFSVolumeClient's get_authorized_ids().
642 """
643 volumeclient_mount = self.mounts[1]
644 volumeclient_mount.umount_wait()
645
646 # Configure volumeclient_mount as the handle for driving volumeclient.
647 self._configure_vc_auth(volumeclient_mount, "manila")
648
649 group_id = "grpid"
650 volume_id = "volid"
651 guest_entity_1 = "guest1"
652 guest_entity_2 = "guest2"
653
654 log.info("print group ID: {0}".format(group_id))
655
656 # Create a volume.
657 auths = self._volume_client_python(volumeclient_mount, dedent("""
658 vp = VolumePath("{group_id}", "{volume_id}")
659 vc.create_volume(vp, 1024*1024*10)
660 auths = vc.get_authorized_ids(vp)
661 print auths
662 """.format(
663 group_id=group_id,
664 volume_id=volume_id,
665 )))
666 # Check the list of authorized IDs for the volume.
667 expected_result = None
668 self.assertEqual(str(expected_result), auths)
669
670 # Allow two auth IDs access to the volume.
671 auths = self._volume_client_python(volumeclient_mount, dedent("""
672 vp = VolumePath("{group_id}", "{volume_id}")
673 vc.authorize(vp, "{guest_entity_1}", readonly=False)
674 vc.authorize(vp, "{guest_entity_2}", readonly=True)
675 auths = vc.get_authorized_ids(vp)
676 print auths
677 """.format(
678 group_id=group_id,
679 volume_id=volume_id,
680 guest_entity_1=guest_entity_1,
681 guest_entity_2=guest_entity_2,
682 )))
683 # Check the list of authorized IDs and their access levels.
684 expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
685 self.assertItemsEqual(str(expected_result), auths)
686
687 # Disallow both the auth IDs' access to the volume.
688 auths = self._volume_client_python(volumeclient_mount, dedent("""
689 vp = VolumePath("{group_id}", "{volume_id}")
690 vc.deauthorize(vp, "{guest_entity_1}")
691 vc.deauthorize(vp, "{guest_entity_2}")
692 auths = vc.get_authorized_ids(vp)
693 print auths
694 """.format(
695 group_id=group_id,
696 volume_id=volume_id,
697 guest_entity_1=guest_entity_1,
698 guest_entity_2=guest_entity_2,
699 )))
700 # Check the list of authorized IDs for the volume.
701 expected_result = None
702 self.assertItemsEqual(str(expected_result), auths)
703
704 def test_multitenant_volumes(self):
705 """
706 That volume access can be restricted to a tenant.
707
708 That metadata used to enforce tenant isolation of
709 volumes is stored as a two-way mapping between auth
710 IDs and volumes that they're authorized to access.
711 """
712 volumeclient_mount = self.mounts[1]
713 volumeclient_mount.umount_wait()
714
715 # Configure volumeclient_mount as the handle for driving volumeclient.
716 self._configure_vc_auth(volumeclient_mount, "manila")
717
718 group_id = "groupid"
719 volume_id = "volumeid"
720
721 # Guest clients belonging to different tenants, but using the same
722 # auth ID.
723 auth_id = "guest"
724 guestclient_1 = {
725 "auth_id": auth_id,
726 "tenant_id": "tenant1",
727 }
728 guestclient_2 = {
729 "auth_id": auth_id,
730 "tenant_id": "tenant2",
731 }
732
733 # Create a volume.
734 self._volume_client_python(volumeclient_mount, dedent("""
735 vp = VolumePath("{group_id}", "{volume_id}")
736 vc.create_volume(vp, 1024*1024*10)
737 """.format(
738 group_id=group_id,
739 volume_id=volume_id,
740 )))
741
742 # Check that volume metadata file is created on volume creation.
743 vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id)
744 self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
745
746 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
747 # 'tenant1', with 'rw' access to the volume.
748 self._volume_client_python(volumeclient_mount, dedent("""
749 vp = VolumePath("{group_id}", "{volume_id}")
750 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
751 """.format(
752 group_id=group_id,
753 volume_id=volume_id,
754 auth_id=guestclient_1["auth_id"],
755 tenant_id=guestclient_1["tenant_id"]
756 )))
757
758 # Check that auth metadata file for auth ID 'guest', is
759 # created on authorizing 'guest' access to the volume.
760 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
761 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
762
763 # Verify that the auth metadata file stores the tenant ID that the
764 # auth ID belongs to, the auth ID's authorized access levels
765 # for different volumes, versioning details, etc.
766 expected_auth_metadata = {
767 u"version": 1,
768 u"compat_version": 1,
769 u"dirty": False,
770 u"tenant_id": u"tenant1",
771 u"volumes": {
772 u"groupid/volumeid": {
773 u"dirty": False,
774 u"access_level": u"rw",
775 }
776 }
777 }
778
779 auth_metadata = self._volume_client_python(volumeclient_mount, dedent("""
780 vp = VolumePath("{group_id}", "{volume_id}")
781 auth_metadata = vc._auth_metadata_get("{auth_id}")
782 print auth_metadata
783 """.format(
784 group_id=group_id,
785 volume_id=volume_id,
786 auth_id=guestclient_1["auth_id"],
787 )))
788
789 self.assertItemsEqual(str(expected_auth_metadata), auth_metadata)
790
791 # Verify that the volume metadata file stores info about auth IDs
792 # and their access levels to the volume, versioning details, etc.
793 expected_vol_metadata = {
794 u"version": 1,
795 u"compat_version": 1,
796 u"auths": {
797 u"guest": {
798 u"dirty": False,
799 u"access_level": u"rw"
800 }
801 }
802 }
803
804 vol_metadata = self._volume_client_python(volumeclient_mount, dedent("""
805 vp = VolumePath("{group_id}", "{volume_id}")
806 volume_metadata = vc._volume_metadata_get(vp)
807 print volume_metadata
808 """.format(
809 group_id=group_id,
810 volume_id=volume_id,
811 )))
812 self.assertItemsEqual(str(expected_vol_metadata), vol_metadata)
813
814 # Cannot authorize 'guestclient_2' to access the volume.
815 # It uses auth ID 'guest', which has already been used by a
816 # 'guestclient_1' belonging to an another tenant for accessing
817 # the volume.
818 with self.assertRaises(CommandFailedError):
819 self._volume_client_python(volumeclient_mount, dedent("""
820 vp = VolumePath("{group_id}", "{volume_id}")
821 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
822 """.format(
823 group_id=group_id,
824 volume_id=volume_id,
825 auth_id=guestclient_2["auth_id"],
826 tenant_id=guestclient_2["tenant_id"]
827 )))
828
829 # Check that auth metadata file is cleaned up on removing
830 # auth ID's only access to a volume.
831 self._volume_client_python(volumeclient_mount, dedent("""
832 vp = VolumePath("{group_id}", "{volume_id}")
833 vc.deauthorize(vp, "{guest_entity}")
834 """.format(
835 group_id=group_id,
836 volume_id=volume_id,
837 guest_entity=guestclient_1["auth_id"]
838 )))
839
840 self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
841
842 # Check that volume metadata file is cleaned up on volume deletion.
843 self._volume_client_python(volumeclient_mount, dedent("""
844 vp = VolumePath("{group_id}", "{volume_id}")
845 vc.delete_volume(vp)
846 """.format(
847 group_id=group_id,
848 volume_id=volume_id,
849 )))
850 self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
851
852 def test_recover_metadata(self):
853 """
854 That volume client can recover from partial auth updates using
855 metadata files, which store auth info and its update status info.
856 """
857 volumeclient_mount = self.mounts[1]
858 volumeclient_mount.umount_wait()
859
860 # Configure volumeclient_mount as the handle for driving volumeclient.
861 self._configure_vc_auth(volumeclient_mount, "manila")
862
863 group_id = "groupid"
864 volume_id = "volumeid"
865
866 guestclient = {
867 "auth_id": "guest",
868 "tenant_id": "tenant",
869 }
870
871 # Create a volume.
872 self._volume_client_python(volumeclient_mount, dedent("""
873 vp = VolumePath("{group_id}", "{volume_id}")
874 vc.create_volume(vp, 1024*1024*10)
875 """.format(
876 group_id=group_id,
877 volume_id=volume_id,
878 )))
879
880 # Authorize 'guestclient' access to the volume.
881 self._volume_client_python(volumeclient_mount, dedent("""
882 vp = VolumePath("{group_id}", "{volume_id}")
883 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
884 """.format(
885 group_id=group_id,
886 volume_id=volume_id,
887 auth_id=guestclient["auth_id"],
888 tenant_id=guestclient["tenant_id"]
889 )))
890
891 # Check that auth metadata file for auth ID 'guest' is created.
892 auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"])
893 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
894
895 # Induce partial auth update state by modifying the auth metadata file,
896 # and then run recovery procedure.
897 self._volume_client_python(volumeclient_mount, dedent("""
898 vp = VolumePath("{group_id}", "{volume_id}")
899 auth_metadata = vc._auth_metadata_get("{auth_id}")
900 auth_metadata['dirty'] = True
901 vc._auth_metadata_set("{auth_id}", auth_metadata)
902 vc.recover()
903 """.format(
904 group_id=group_id,
905 volume_id=volume_id,
906 auth_id=guestclient["auth_id"],
907 )))