]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_volume_client.py
update sources to 12.2.7
[ceph.git] / ceph / qa / tasks / cephfs / test_volume_client.py
CommitLineData
7c673cae
FG
1import json
2import logging
3import time
4import os
5from textwrap import dedent
6from tasks.cephfs.cephfs_test_case import CephFSTestCase
7from tasks.cephfs.fuse_mount import FuseMount
8from teuthology.exceptions import CommandFailedError
9
10log = logging.getLogger(__name__)
11
12
13class TestVolumeClient(CephFSTestCase):
7c673cae
FG
14 # One for looking at the global filesystem, one for being
15 # the VolumeClient, two for mounting the created shares
16 CLIENTS_REQUIRED = 4
17
18 def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
19 # Can't dedent this *and* the script we pass in, because they might have different
20 # levels of indentation to begin with, so leave this string zero-indented
21 if vol_prefix:
22 vol_prefix = "\"" + vol_prefix + "\""
23 if ns_prefix:
24 ns_prefix = "\"" + ns_prefix + "\""
25 return client.run_python("""
26from ceph_volume_client import CephFSVolumeClient, VolumePath
27import logging
28log = logging.getLogger("ceph_volume_client")
29log.addHandler(logging.StreamHandler())
30log.setLevel(logging.DEBUG)
31vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
32vc.connect()
33{payload}
34vc.disconnect()
35 """.format(payload=script, conf_path=client.config_path, vol_prefix=vol_prefix, ns_prefix=ns_prefix))
36
37 def _sudo_write_file(self, remote, path, data):
38 """
39 Write data to a remote file as super user
40
41 :param remote: Remote site.
42 :param path: Path on the remote being written to.
43 :param data: Data to be written.
44
45 Both perms and owner are passed directly to chmod.
46 """
47 remote.run(
48 args=[
49 'sudo',
50 'python',
51 '-c',
52 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
53 path,
54 ],
55 stdin=data,
56 )
57
58 def _configure_vc_auth(self, mount, id_name):
59 """
60 Set up auth credentials for the VolumeClient user
61 """
62 out = self.fs.mon_manager.raw_cluster_cmd(
63 "auth", "get-or-create", "client.{name}".format(name=id_name),
64 "mds", "allow *",
65 "osd", "allow rw",
66 "mon", "allow *"
67 )
68 mount.client_id = id_name
69 self._sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
70 self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
71
72 def _configure_guest_auth(self, volumeclient_mount, guest_mount,
73 guest_entity, mount_path,
74 namespace_prefix=None, readonly=False,
75 tenant_id=None):
76 """
77 Set up auth credentials for the guest client to mount a volume.
78
79 :param volumeclient_mount: mount used as the handle for driving
80 volumeclient.
81 :param guest_mount: mount used by the guest client.
82 :param guest_entity: auth ID used by the guest client.
83 :param mount_path: path of the volume.
84 :param namespace_prefix: name prefix of the RADOS namespace, which
85 is used for the volume's layout.
86 :param readonly: defaults to False. If set to 'True' only read-only
87 mount access is granted to the guest.
88 :param tenant_id: (OpenStack) tenant ID of the guest client.
89 """
90
91 head, volume_id = os.path.split(mount_path)
92 head, group_id = os.path.split(head)
93 head, volume_prefix = os.path.split(head)
94 volume_prefix = "/" + volume_prefix
95
96 # Authorize the guest client's auth ID to mount the volume.
97 key = self._volume_client_python(volumeclient_mount, dedent("""
98 vp = VolumePath("{group_id}", "{volume_id}")
99 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
100 tenant_id="{tenant_id}")
101 print auth_result['auth_key']
102 """.format(
103 group_id=group_id,
104 volume_id=volume_id,
105 guest_entity=guest_entity,
106 readonly=readonly,
107 tenant_id=tenant_id)), volume_prefix, namespace_prefix
108 )
109
110 # CephFSVolumeClient's authorize() does not return the secret
111 # key to a caller who isn't multi-tenant aware. Explicitly
112 # query the key for such a client.
113 if not tenant_id:
114 key = self.fs.mon_manager.raw_cluster_cmd(
115 "auth", "get-key", "client.{name}".format(name=guest_entity),
116 )
117
118 # The guest auth ID should exist.
119 existing_ids = [a['entity'] for a in self.auth_list()]
120 self.assertIn("client.{0}".format(guest_entity), existing_ids)
121
122 # Create keyring file for the guest client.
123 keyring_txt = dedent("""
124 [client.{guest_entity}]
125 key = {key}
126
127 """.format(
128 guest_entity=guest_entity,
129 key=key
130 ))
131 guest_mount.client_id = guest_entity
132 self._sudo_write_file(guest_mount.client_remote,
133 guest_mount.get_keyring_path(),
134 keyring_txt)
135
136 # Add a guest client section to the ceph config file.
137 self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
138 self.set_conf("client.{0}".format(guest_entity), "debug client", "20")
139 self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20")
140 self.set_conf("client.{0}".format(guest_entity),
141 "keyring", guest_mount.get_keyring_path())
142
143 def test_default_prefix(self):
144 group_id = "grpid"
145 volume_id = "volid"
146 DEFAULT_VOL_PREFIX = "volumes"
147 DEFAULT_NS_PREFIX = "fsvolumens_"
148
149 self.mount_b.umount_wait()
150 self._configure_vc_auth(self.mount_b, "manila")
151
152 #create a volume with default prefix
153 self._volume_client_python(self.mount_b, dedent("""
154 vp = VolumePath("{group_id}", "{volume_id}")
155 vc.create_volume(vp, 10, data_isolated=True)
156 """.format(
157 group_id=group_id,
158 volume_id=volume_id,
159 )))
160
161 # The dir should be created
162 self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id))
163
164 #namespace should be set
165 ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace")
166 namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id)
167 self.assertEqual(namespace, ns_in_attr)
168
169
170 def test_lifecycle(self):
171 """
172 General smoke test for create, extend, destroy
173 """
174
175 # I'm going to use mount_c later as a guest for mounting the created
176 # shares
177 self.mounts[2].umount_wait()
178
179 # I'm going to leave mount_b unmounted and just use it as a handle for
180 # driving volumeclient. It's a little hacky but we don't have a more
181 # general concept for librados/libcephfs clients as opposed to full
182 # blown mounting clients.
183 self.mount_b.umount_wait()
184 self._configure_vc_auth(self.mount_b, "manila")
185
186 guest_entity = "guest"
187 group_id = "grpid"
188 volume_id = "volid"
189
190 volume_prefix = "/myprefix"
191 namespace_prefix = "mynsprefix_"
192
193 # Create a 100MB volume
194 volume_size = 100
195 mount_path = self._volume_client_python(self.mount_b, dedent("""
196 vp = VolumePath("{group_id}", "{volume_id}")
197 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
198 print create_result['mount_path']
199 """.format(
200 group_id=group_id,
201 volume_id=volume_id,
202 volume_size=volume_size
203 )), volume_prefix, namespace_prefix)
204
205 # The dir should be created
206 self.mount_a.stat(os.path.join("myprefix", group_id, volume_id))
207
208 # Authorize and configure credentials for the guest to mount the
209 # the volume.
210 self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
211 mount_path, namespace_prefix)
212 self.mounts[2].mount(mount_path=mount_path)
213
214 # The kernel client doesn't have the quota-based df behaviour,
215 # or quotas at all, so only exercise the client behaviour when
216 # running fuse.
217 if isinstance(self.mounts[2], FuseMount):
218 # df should see volume size, same as the quota set on volume's dir
219 self.assertEqual(self.mounts[2].df()['total'],
220 volume_size * 1024 * 1024)
221 self.assertEqual(
222 self.mount_a.getfattr(
223 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
224 "ceph.quota.max_bytes"),
225 "%s" % (volume_size * 1024 * 1024))
226
227 # df granularity is 4MB block so have to write at least that much
228 data_bin_mb = 4
229 self.mounts[2].write_n_mb("data.bin", data_bin_mb)
230
231 # Write something outside volume to check this space usage is
232 # not reported in the volume's DF.
d2e6a577 233 other_bin_mb = 8
7c673cae
FG
234 self.mount_a.write_n_mb("other.bin", other_bin_mb)
235
236 # global: df should see all the writes (data + other). This is a >
237 # rather than a == because the global spaced used includes all pools
d2e6a577
FG
238 def check_df():
239 used = self.mount_a.df()['used']
240 return used >= (other_bin_mb * 1024 * 1024)
241
242 self.wait_until_true(check_df, timeout=30)
7c673cae
FG
243
244 # Hack: do a metadata IO to kick rstats
245 self.mounts[2].run_shell(["touch", "foo"])
246
247 # volume: df should see the data_bin_mb consumed from quota, same
248 # as the rbytes for the volume's dir
249 self.wait_until_equal(
250 lambda: self.mounts[2].df()['used'],
251 data_bin_mb * 1024 * 1024, timeout=60)
252 self.wait_until_equal(
253 lambda: self.mount_a.getfattr(
254 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
255 "ceph.dir.rbytes"),
256 "%s" % (data_bin_mb * 1024 * 1024), timeout=60)
257
258 # sync so that file data are persist to rados
259 self.mounts[2].run_shell(["sync"])
260
261 # Our data should stay in particular rados namespace
262 pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool")
263 namespace = "{0}{1}".format(namespace_prefix, volume_id)
264 ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace")
265 self.assertEqual(namespace, ns_in_attr)
266
267 objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n"))
268 self.assertNotEqual(objects_in_ns, set())
269
270 # De-authorize the guest
271 self._volume_client_python(self.mount_b, dedent("""
272 vp = VolumePath("{group_id}", "{volume_id}")
273 vc.deauthorize(vp, "{guest_entity}")
274 vc.evict("{guest_entity}")
275 """.format(
276 group_id=group_id,
277 volume_id=volume_id,
278 guest_entity=guest_entity
279 )), volume_prefix, namespace_prefix)
280
281 # Once deauthorized, the client should be unable to do any more metadata ops
282 # The way that the client currently behaves here is to block (it acts like
283 # it has lost network, because there is nothing to tell it that is messages
284 # are being dropped because it's identity is gone)
285 background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False)
28e407b8
AA
286 try:
287 background.wait()
288 except CommandFailedError:
289 # command failed with EBLACKLISTED?
290 if "transport endpoint shutdown" in background.stderr.getvalue():
291 pass
292 else:
293 raise
7c673cae
FG
294
295 # After deauthorisation, the client ID should be gone (this was the only
296 # volume it was authorised for)
297 self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()])
298
299 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
28e407b8 300 self.mounts[2].umount_wait()
7c673cae
FG
301
302 self._volume_client_python(self.mount_b, dedent("""
303 vp = VolumePath("{group_id}", "{volume_id}")
304 vc.delete_volume(vp)
305 vc.purge_volume(vp)
306 """.format(
307 group_id=group_id,
308 volume_id=volume_id,
309 )), volume_prefix, namespace_prefix)
310
311 def test_idempotency(self):
312 """
313 That the volumeclient interface works when calling everything twice
314 """
315 self.mount_b.umount_wait()
316 self._configure_vc_auth(self.mount_b, "manila")
317
318 guest_entity = "guest"
319 group_id = "grpid"
320 volume_id = "volid"
321 self._volume_client_python(self.mount_b, dedent("""
322 vp = VolumePath("{group_id}", "{volume_id}")
323 vc.create_volume(vp, 10)
324 vc.create_volume(vp, 10)
325 vc.authorize(vp, "{guest_entity}")
326 vc.authorize(vp, "{guest_entity}")
327 vc.deauthorize(vp, "{guest_entity}")
328 vc.deauthorize(vp, "{guest_entity}")
329 vc.delete_volume(vp)
330 vc.delete_volume(vp)
331 vc.purge_volume(vp)
332 vc.purge_volume(vp)
333
334 vc.create_volume(vp, 10, data_isolated=True)
335 vc.create_volume(vp, 10, data_isolated=True)
336 vc.authorize(vp, "{guest_entity}")
337 vc.authorize(vp, "{guest_entity}")
338 vc.deauthorize(vp, "{guest_entity}")
339 vc.deauthorize(vp, "{guest_entity}")
340 vc.evict("{guest_entity}")
341 vc.evict("{guest_entity}")
342 vc.delete_volume(vp, data_isolated=True)
343 vc.delete_volume(vp, data_isolated=True)
344 vc.purge_volume(vp, data_isolated=True)
345 vc.purge_volume(vp, data_isolated=True)
28e407b8
AA
346
347 vc.create_volume(vp, 10, namespace_isolated=False)
348 vc.create_volume(vp, 10, namespace_isolated=False)
349 vc.authorize(vp, "{guest_entity}")
350 vc.authorize(vp, "{guest_entity}")
351 vc.deauthorize(vp, "{guest_entity}")
352 vc.deauthorize(vp, "{guest_entity}")
353 vc.evict("{guest_entity}")
354 vc.evict("{guest_entity}")
355 vc.delete_volume(vp)
356 vc.delete_volume(vp)
357 vc.purge_volume(vp)
358 vc.purge_volume(vp)
7c673cae
FG
359 """.format(
360 group_id=group_id,
361 volume_id=volume_id,
362 guest_entity=guest_entity
363 )))
364
365 def test_data_isolated(self):
366 """
367 That data isolated shares get their own pool
368 :return:
369 """
370
3efd9988 371 # Because the teuthology config template sets mon_max_pg_per_osd to
7c673cae
FG
372 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
373 # sane before using volume_client, to avoid creating pools with absurdly large
374 # numbers of PGs.
3efd9988 375 self.set_conf("global", "mon max pg per osd", "300")
7c673cae
FG
376 for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
377 mon_daemon_state.restart()
378
379 self.mount_b.umount_wait()
380 self._configure_vc_auth(self.mount_b, "manila")
381
382 # Calculate how many PGs we'll expect the new volume pool to have
383 osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
3efd9988 384 max_per_osd = int(self.fs.get_config('mon_max_pg_per_osd'))
7c673cae
FG
385 osd_count = len(osd_map['osds'])
386 max_overall = osd_count * max_per_osd
387
388 existing_pg_count = 0
389 for p in osd_map['pools']:
390 existing_pg_count += p['pg_num']
391
392 expected_pg_num = (max_overall - existing_pg_count) / 10
393 log.info("max_per_osd {0}".format(max_per_osd))
394 log.info("osd_count {0}".format(osd_count))
395 log.info("max_overall {0}".format(max_overall))
396 log.info("existing_pg_count {0}".format(existing_pg_count))
397 log.info("expected_pg_num {0}".format(expected_pg_num))
398
399 pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
400
401 group_id = "grpid"
402 volume_id = "volid"
403 self._volume_client_python(self.mount_b, dedent("""
404 vp = VolumePath("{group_id}", "{volume_id}")
405 vc.create_volume(vp, 10, data_isolated=True)
406 """.format(
407 group_id=group_id,
408 volume_id=volume_id,
409 )))
410
411 pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
412
413 # Should have created one new pool
414 new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
415 self.assertEqual(len(new_pools), 1)
416
417 # It should have followed the heuristic for PG count
418 # (this is an overly strict test condition, so we may want to remove
419 # it at some point as/when the logic gets fancier)
420 created_pg_num = self.fs.mon_manager.get_pool_property(list(new_pools)[0], "pg_num")
421 self.assertEqual(expected_pg_num, created_pg_num)
422
423 def test_15303(self):
424 """
425 Reproducer for #15303 "Client holds incorrect complete flag on dir
426 after losing caps" (http://tracker.ceph.com/issues/15303)
427 """
428 for m in self.mounts:
429 m.umount_wait()
430
431 # Create a dir on mount A
432 self.mount_a.mount()
433 self.mount_a.run_shell(["mkdir", "parent1"])
434 self.mount_a.run_shell(["mkdir", "parent2"])
435 self.mount_a.run_shell(["mkdir", "parent1/mydir"])
436
437 # Put some files in it from mount B
438 self.mount_b.mount()
439 self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
440 self.mount_b.umount_wait()
441
442 # List the dir's contents on mount A
443 self.assertListEqual(self.mount_a.ls("parent1/mydir"),
444 ["afile"])
445
446 def test_evict_client(self):
447 """
448 That a volume client can be evicted based on its auth ID and the volume
449 path it has mounted.
450 """
451
452 if not isinstance(self.mount_a, FuseMount):
453 self.skipTest("Requires FUSE client to inject client metadata")
454
455 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
456 # and mounts[3] would be used as guests to mount the volumes/shares.
457
458 for i in range(1, 4):
459 self.mounts[i].umount_wait()
460
461 volumeclient_mount = self.mounts[1]
462 self._configure_vc_auth(volumeclient_mount, "manila")
463 guest_mounts = (self.mounts[2], self.mounts[3])
464
465 guest_entity = "guest"
466 group_id = "grpid"
467 mount_paths = []
468 volume_ids = []
469
470 # Create two volumes. Authorize 'guest' auth ID to mount the two
471 # volumes. Mount the two volumes. Write data to the volumes.
472 for i in range(2):
473 # Create volume.
474 volume_ids.append("volid_{0}".format(str(i)))
475 mount_paths.append(
476 self._volume_client_python(volumeclient_mount, dedent("""
477 vp = VolumePath("{group_id}", "{volume_id}")
478 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
479 print create_result['mount_path']
480 """.format(
481 group_id=group_id,
482 volume_id=volume_ids[i]
483 ))))
484
485 # Authorize 'guest' auth ID to mount the volume.
486 self._configure_guest_auth(volumeclient_mount, guest_mounts[i],
487 guest_entity, mount_paths[i])
488
489 # Mount the volume.
490 guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format(
491 id=guest_entity, suffix=str(i))
492 guest_mounts[i].mount(mount_path=mount_paths[i])
493 guest_mounts[i].write_n_mb("data.bin", 1)
494
495
496 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
497 # one volume.
498 self._volume_client_python(self.mount_b, dedent("""
499 vp = VolumePath("{group_id}", "{volume_id}")
500 vc.deauthorize(vp, "{guest_entity}")
501 vc.evict("{guest_entity}", volume_path=vp)
502 """.format(
503 group_id=group_id,
504 volume_id=volume_ids[0],
505 guest_entity=guest_entity
506 )))
507
508 # Evicted guest client, guest_mounts[0], should not be able to do
31f18b77
FG
509 # anymore metadata ops. It should start failing all operations
510 # when it sees that its own address is in the blacklist.
511 try:
512 guest_mounts[0].write_n_mb("rogue.bin", 1)
513 except CommandFailedError:
514 pass
515 else:
516 raise RuntimeError("post-eviction write should have failed!")
517
518 # The blacklisted guest client should now be unmountable
519 guest_mounts[0].umount_wait()
7c673cae
FG
520
521 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
522 # has mounted the other volume, should be able to use its volume
523 # unaffected.
524 guest_mounts[1].write_n_mb("data.bin.1", 1)
525
526 # Cleanup.
527 for i in range(2):
528 self._volume_client_python(volumeclient_mount, dedent("""
529 vp = VolumePath("{group_id}", "{volume_id}")
530 vc.deauthorize(vp, "{guest_entity}")
531 vc.delete_volume(vp)
532 vc.purge_volume(vp)
533 """.format(
534 group_id=group_id,
535 volume_id=volume_ids[i],
536 guest_entity=guest_entity
537 )))
538
7c673cae
FG
539
540 def test_purge(self):
541 """
542 Reproducer for #15266, exception trying to purge volumes that
543 contain non-ascii filenames.
544
545 Additionally test any other purge corner cases here.
546 """
547 # I'm going to leave mount_b unmounted and just use it as a handle for
548 # driving volumeclient. It's a little hacky but we don't have a more
549 # general concept for librados/libcephfs clients as opposed to full
550 # blown mounting clients.
551 self.mount_b.umount_wait()
552 self._configure_vc_auth(self.mount_b, "manila")
553
554 group_id = "grpid"
555 # Use a unicode volume ID (like Manila), to reproduce #15266
556 volume_id = u"volid"
557
558 # Create
559 mount_path = self._volume_client_python(self.mount_b, dedent("""
560 vp = VolumePath("{group_id}", u"{volume_id}")
561 create_result = vc.create_volume(vp, 10)
562 print create_result['mount_path']
563 """.format(
564 group_id=group_id,
565 volume_id=volume_id
566 )))
567
568 # Strip leading "/"
569 mount_path = mount_path[1:]
570
571 # A file with non-ascii characters
572 self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")])
573
574 # A file with no permissions to do anything
575 self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")])
576 self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")])
577
578 self._volume_client_python(self.mount_b, dedent("""
579 vp = VolumePath("{group_id}", u"{volume_id}")
580 vc.delete_volume(vp)
581 vc.purge_volume(vp)
582 """.format(
583 group_id=group_id,
584 volume_id=volume_id
585 )))
586
587 # Check it's really gone
588 self.assertEqual(self.mount_a.ls("volumes/_deleting"), [])
589 self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id])
590
591 def test_readonly_authorization(self):
592 """
593 That guest clients can be restricted to read-only mounts of volumes.
594 """
595
596 volumeclient_mount = self.mounts[1]
597 guest_mount = self.mounts[2]
598 volumeclient_mount.umount_wait()
599 guest_mount.umount_wait()
600
601 # Configure volumeclient_mount as the handle for driving volumeclient.
602 self._configure_vc_auth(volumeclient_mount, "manila")
603
604 guest_entity = "guest"
605 group_id = "grpid"
606 volume_id = "volid"
607
608 # Create a volume.
609 mount_path = self._volume_client_python(volumeclient_mount, dedent("""
610 vp = VolumePath("{group_id}", "{volume_id}")
611 create_result = vc.create_volume(vp, 1024*1024*10)
612 print create_result['mount_path']
613 """.format(
614 group_id=group_id,
615 volume_id=volume_id,
616 )))
617
618 # Authorize and configure credentials for the guest to mount the
619 # the volume with read-write access.
620 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
621 mount_path, readonly=False)
622
623 # Mount the volume, and write to it.
624 guest_mount.mount(mount_path=mount_path)
625 guest_mount.write_n_mb("data.bin", 1)
626
627 # Change the guest auth ID's authorization to read-only mount access.
628 self._volume_client_python(volumeclient_mount, dedent("""
629 vp = VolumePath("{group_id}", "{volume_id}")
630 vc.deauthorize(vp, "{guest_entity}")
631 """.format(
632 group_id=group_id,
633 volume_id=volume_id,
634 guest_entity=guest_entity
635 )))
636 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
637 mount_path, readonly=True)
638
639 # The effect of the change in access level to read-only is not
640 # immediate. The guest sees the change only after a remount of
641 # the volume.
642 guest_mount.umount_wait()
643 guest_mount.mount(mount_path=mount_path)
644
645 # Read existing content of the volume.
646 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
647 # Cannot write into read-only volume.
648 with self.assertRaises(CommandFailedError):
649 guest_mount.write_n_mb("rogue.bin", 1)
650
651 def test_get_authorized_ids(self):
652 """
653 That for a volume, the authorized IDs and their access levels
654 can be obtained using CephFSVolumeClient's get_authorized_ids().
655 """
656 volumeclient_mount = self.mounts[1]
657 volumeclient_mount.umount_wait()
658
659 # Configure volumeclient_mount as the handle for driving volumeclient.
660 self._configure_vc_auth(volumeclient_mount, "manila")
661
662 group_id = "grpid"
663 volume_id = "volid"
664 guest_entity_1 = "guest1"
665 guest_entity_2 = "guest2"
666
667 log.info("print group ID: {0}".format(group_id))
668
669 # Create a volume.
670 auths = self._volume_client_python(volumeclient_mount, dedent("""
671 vp = VolumePath("{group_id}", "{volume_id}")
672 vc.create_volume(vp, 1024*1024*10)
673 auths = vc.get_authorized_ids(vp)
674 print auths
675 """.format(
676 group_id=group_id,
677 volume_id=volume_id,
678 )))
679 # Check the list of authorized IDs for the volume.
680 expected_result = None
681 self.assertEqual(str(expected_result), auths)
682
683 # Allow two auth IDs access to the volume.
684 auths = self._volume_client_python(volumeclient_mount, dedent("""
685 vp = VolumePath("{group_id}", "{volume_id}")
686 vc.authorize(vp, "{guest_entity_1}", readonly=False)
687 vc.authorize(vp, "{guest_entity_2}", readonly=True)
688 auths = vc.get_authorized_ids(vp)
689 print auths
690 """.format(
691 group_id=group_id,
692 volume_id=volume_id,
693 guest_entity_1=guest_entity_1,
694 guest_entity_2=guest_entity_2,
695 )))
696 # Check the list of authorized IDs and their access levels.
697 expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
698 self.assertItemsEqual(str(expected_result), auths)
699
700 # Disallow both the auth IDs' access to the volume.
701 auths = self._volume_client_python(volumeclient_mount, dedent("""
702 vp = VolumePath("{group_id}", "{volume_id}")
703 vc.deauthorize(vp, "{guest_entity_1}")
704 vc.deauthorize(vp, "{guest_entity_2}")
705 auths = vc.get_authorized_ids(vp)
706 print auths
707 """.format(
708 group_id=group_id,
709 volume_id=volume_id,
710 guest_entity_1=guest_entity_1,
711 guest_entity_2=guest_entity_2,
712 )))
713 # Check the list of authorized IDs for the volume.
714 expected_result = None
715 self.assertItemsEqual(str(expected_result), auths)
716
717 def test_multitenant_volumes(self):
718 """
719 That volume access can be restricted to a tenant.
720
721 That metadata used to enforce tenant isolation of
722 volumes is stored as a two-way mapping between auth
723 IDs and volumes that they're authorized to access.
724 """
725 volumeclient_mount = self.mounts[1]
726 volumeclient_mount.umount_wait()
727
728 # Configure volumeclient_mount as the handle for driving volumeclient.
729 self._configure_vc_auth(volumeclient_mount, "manila")
730
731 group_id = "groupid"
732 volume_id = "volumeid"
733
734 # Guest clients belonging to different tenants, but using the same
735 # auth ID.
736 auth_id = "guest"
737 guestclient_1 = {
738 "auth_id": auth_id,
739 "tenant_id": "tenant1",
740 }
741 guestclient_2 = {
742 "auth_id": auth_id,
743 "tenant_id": "tenant2",
744 }
745
746 # Create a volume.
747 self._volume_client_python(volumeclient_mount, dedent("""
748 vp = VolumePath("{group_id}", "{volume_id}")
749 vc.create_volume(vp, 1024*1024*10)
750 """.format(
751 group_id=group_id,
752 volume_id=volume_id,
753 )))
754
755 # Check that volume metadata file is created on volume creation.
756 vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id)
757 self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
758
759 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
760 # 'tenant1', with 'rw' access to the volume.
761 self._volume_client_python(volumeclient_mount, dedent("""
762 vp = VolumePath("{group_id}", "{volume_id}")
763 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
764 """.format(
765 group_id=group_id,
766 volume_id=volume_id,
767 auth_id=guestclient_1["auth_id"],
768 tenant_id=guestclient_1["tenant_id"]
769 )))
770
771 # Check that auth metadata file for auth ID 'guest', is
772 # created on authorizing 'guest' access to the volume.
773 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
774 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
775
776 # Verify that the auth metadata file stores the tenant ID that the
777 # auth ID belongs to, the auth ID's authorized access levels
778 # for different volumes, versioning details, etc.
779 expected_auth_metadata = {
28e407b8
AA
780 "version": 2,
781 "compat_version": 1,
782 "dirty": False,
783 "tenant_id": u"tenant1",
784 "volumes": {
785 "groupid/volumeid": {
786 "dirty": False,
787 "access_level": u"rw",
7c673cae
FG
788 }
789 }
790 }
791
792 auth_metadata = self._volume_client_python(volumeclient_mount, dedent("""
28e407b8 793 import json
7c673cae
FG
794 vp = VolumePath("{group_id}", "{volume_id}")
795 auth_metadata = vc._auth_metadata_get("{auth_id}")
28e407b8 796 print(json.dumps(auth_metadata))
7c673cae
FG
797 """.format(
798 group_id=group_id,
799 volume_id=volume_id,
800 auth_id=guestclient_1["auth_id"],
801 )))
28e407b8 802 auth_metadata = json.loads(auth_metadata)
7c673cae 803
28e407b8
AA
804 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
805 del expected_auth_metadata["version"]
806 del auth_metadata["version"]
807 self.assertEqual(expected_auth_metadata, auth_metadata)
7c673cae
FG
808
809 # Verify that the volume metadata file stores info about auth IDs
810 # and their access levels to the volume, versioning details, etc.
811 expected_vol_metadata = {
28e407b8
AA
812 "version": 2,
813 "compat_version": 1,
814 "auths": {
815 "guest": {
816 "dirty": False,
817 "access_level": u"rw"
7c673cae
FG
818 }
819 }
820 }
821
822 vol_metadata = self._volume_client_python(volumeclient_mount, dedent("""
28e407b8 823 import json
7c673cae
FG
824 vp = VolumePath("{group_id}", "{volume_id}")
825 volume_metadata = vc._volume_metadata_get(vp)
28e407b8 826 print(json.dumps(volume_metadata))
7c673cae
FG
827 """.format(
828 group_id=group_id,
829 volume_id=volume_id,
830 )))
28e407b8
AA
831 vol_metadata = json.loads(vol_metadata)
832
833 self.assertGreaterEqual(vol_metadata["version"], expected_vol_metadata["version"])
834 del expected_vol_metadata["version"]
835 del vol_metadata["version"]
836 self.assertEqual(expected_vol_metadata, vol_metadata)
7c673cae
FG
837
838 # Cannot authorize 'guestclient_2' to access the volume.
839 # It uses auth ID 'guest', which has already been used by a
840 # 'guestclient_1' belonging to an another tenant for accessing
841 # the volume.
842 with self.assertRaises(CommandFailedError):
843 self._volume_client_python(volumeclient_mount, dedent("""
844 vp = VolumePath("{group_id}", "{volume_id}")
845 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
846 """.format(
847 group_id=group_id,
848 volume_id=volume_id,
849 auth_id=guestclient_2["auth_id"],
850 tenant_id=guestclient_2["tenant_id"]
851 )))
852
853 # Check that auth metadata file is cleaned up on removing
854 # auth ID's only access to a volume.
855 self._volume_client_python(volumeclient_mount, dedent("""
856 vp = VolumePath("{group_id}", "{volume_id}")
857 vc.deauthorize(vp, "{guest_entity}")
858 """.format(
859 group_id=group_id,
860 volume_id=volume_id,
861 guest_entity=guestclient_1["auth_id"]
862 )))
863
864 self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
865
866 # Check that volume metadata file is cleaned up on volume deletion.
867 self._volume_client_python(volumeclient_mount, dedent("""
868 vp = VolumePath("{group_id}", "{volume_id}")
869 vc.delete_volume(vp)
870 """.format(
871 group_id=group_id,
872 volume_id=volume_id,
873 )))
874 self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
875
876 def test_recover_metadata(self):
877 """
878 That volume client can recover from partial auth updates using
879 metadata files, which store auth info and its update status info.
880 """
881 volumeclient_mount = self.mounts[1]
882 volumeclient_mount.umount_wait()
883
884 # Configure volumeclient_mount as the handle for driving volumeclient.
885 self._configure_vc_auth(volumeclient_mount, "manila")
886
887 group_id = "groupid"
888 volume_id = "volumeid"
889
890 guestclient = {
891 "auth_id": "guest",
892 "tenant_id": "tenant",
893 }
894
895 # Create a volume.
896 self._volume_client_python(volumeclient_mount, dedent("""
897 vp = VolumePath("{group_id}", "{volume_id}")
898 vc.create_volume(vp, 1024*1024*10)
899 """.format(
900 group_id=group_id,
901 volume_id=volume_id,
902 )))
903
904 # Authorize 'guestclient' access to the volume.
905 self._volume_client_python(volumeclient_mount, dedent("""
906 vp = VolumePath("{group_id}", "{volume_id}")
907 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
908 """.format(
909 group_id=group_id,
910 volume_id=volume_id,
911 auth_id=guestclient["auth_id"],
912 tenant_id=guestclient["tenant_id"]
913 )))
914
915 # Check that auth metadata file for auth ID 'guest' is created.
916 auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"])
917 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
918
919 # Induce partial auth update state by modifying the auth metadata file,
920 # and then run recovery procedure.
921 self._volume_client_python(volumeclient_mount, dedent("""
922 vp = VolumePath("{group_id}", "{volume_id}")
923 auth_metadata = vc._auth_metadata_get("{auth_id}")
924 auth_metadata['dirty'] = True
925 vc._auth_metadata_set("{auth_id}", auth_metadata)
926 vc.recover()
927 """.format(
928 group_id=group_id,
929 volume_id=volume_id,
930 auth_id=guestclient["auth_id"],
931 )))
3efd9988
FG
932
933 def test_put_object(self):
934 vc_mount = self.mounts[1]
935 vc_mount.umount_wait()
936 self._configure_vc_auth(vc_mount, "manila")
937
938 obj_data = 'test data'
939 obj_name = 'test_vc_obj_1'
940 pool_name = self.fs.get_data_pool_names()[0]
941
942 self._volume_client_python(vc_mount, dedent("""
943 vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}")
944 """.format(
945 pool_name = pool_name,
946 obj_name = obj_name,
947 obj_data = obj_data
948 )))
949
950 read_data = self.fs.rados(['get', obj_name, '-'], pool=pool_name)
951 self.assertEqual(obj_data, read_data)
952
953 def test_get_object(self):
954 vc_mount = self.mounts[1]
955 vc_mount.umount_wait()
956 self._configure_vc_auth(vc_mount, "manila")
957
958 obj_data = 'test_data'
959 obj_name = 'test_vc_ob_2'
960 pool_name = self.fs.get_data_pool_names()[0]
961
962 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
963
964 self._volume_client_python(vc_mount, dedent("""
965 data_read = vc.get_object("{pool_name}", "{obj_name}")
966 assert data_read == b"{obj_data}"
967 """.format(
968 pool_name = pool_name,
969 obj_name = obj_name,
970 obj_data = obj_data
971 )))
972
973 def test_delete_object(self):
974 vc_mount = self.mounts[1]
975 vc_mount.umount_wait()
976 self._configure_vc_auth(vc_mount, "manila")
977
978 obj_data = 'test data'
979 obj_name = 'test_vc_obj_3'
980 pool_name = self.fs.get_data_pool_names()[0]
981
982 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
983
984 self._volume_client_python(vc_mount, dedent("""
985 data_read = vc.delete_object("{pool_name}", "{obj_name}")
986 """.format(
987 pool_name = pool_name,
988 obj_name = obj_name,
989 )))
990
991 with self.assertRaises(CommandFailedError):
992 self.fs.rados(['stat', obj_name], pool=pool_name)
993
994 # Check idempotency -- no error raised trying to delete non-existent
995 # object
996 self._volume_client_python(vc_mount, dedent("""
997 data_read = vc.delete_object("{pool_name}", "{obj_name}")
998 """.format(
999 pool_name = pool_name,
1000 obj_name = obj_name,
1001 )))
1002
1003 def test_21501(self):
1004 """
1005 Reproducer for #21501 "ceph_volume_client: sets invalid caps for
1006 existing IDs with no caps" (http://tracker.ceph.com/issues/21501)
1007 """
1008
1009 vc_mount = self.mounts[1]
1010 vc_mount.umount_wait()
1011
1012 # Configure vc_mount as the handle for driving volumeclient
1013 self._configure_vc_auth(vc_mount, "manila")
1014
1015 # Create a volume
1016 group_id = "grpid"
1017 volume_id = "volid"
1018 mount_path = self._volume_client_python(vc_mount, dedent("""
1019 vp = VolumePath("{group_id}", "{volume_id}")
1020 create_result = vc.create_volume(vp, 1024*1024*10)
1021 print create_result['mount_path']
1022 """.format(
1023 group_id=group_id,
1024 volume_id=volume_id
1025 )))
1026
1027 # Create an auth ID with no caps
1028 guest_id = '21501'
1029 self.fs.mon_manager.raw_cluster_cmd_result(
1030 'auth', 'get-or-create', 'client.{0}'.format(guest_id))
1031
1032 guest_mount = self.mounts[2]
1033 guest_mount.umount_wait()
1034
1035 # Set auth caps for the auth ID using the volumeclient
1036 self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path)
1037
1038 # Mount the volume in the guest using the auth ID to assert that the
1039 # auth caps are valid
1040 guest_mount.mount(mount_path=mount_path)
28e407b8
AA
1041
1042 def test_volume_without_namespace_isolation(self):
1043 """
1044 That volume client can create volumes that do not have separate RADOS
1045 namespace layouts.
1046 """
1047 vc_mount = self.mounts[1]
1048 vc_mount.umount_wait()
1049
1050 # Configure vc_mount as the handle for driving volumeclient
1051 self._configure_vc_auth(vc_mount, "manila")
1052
1053 # Create a volume
1054 volume_prefix = "/myprefix"
1055 group_id = "grpid"
1056 volume_id = "volid"
1057 mount_path = self._volume_client_python(vc_mount, dedent("""
1058 vp = VolumePath("{group_id}", "{volume_id}")
1059 create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
1060 print create_result['mount_path']
1061 """.format(
1062 group_id=group_id,
1063 volume_id=volume_id
1064 )), volume_prefix)
1065
1066 # The CephFS volume should be created
1067 self.mounts[0].stat(os.path.join("myprefix", group_id, volume_id))
1068 vol_namespace = self.mounts[0].getfattr(
1069 os.path.join("myprefix", group_id, volume_id),
1070 "ceph.dir.layout.pool_namespace")
1071 assert not vol_namespace
1072
1073 self._volume_client_python(vc_mount, dedent("""
1074 vp = VolumePath("{group_id}", "{volume_id}")
1075 vc.delete_volume(vp)
1076 vc.purge_volume(vp)
1077 """.format(
1078 group_id=group_id,
1079 volume_id=volume_id,
1080 )), volume_prefix)