]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_volume_client.py
import ceph 14.2.5
[ceph.git] / ceph / qa / tasks / cephfs / test_volume_client.py
CommitLineData
7c673cae
FG
1import json
2import logging
3import time
4import os
5from textwrap import dedent
6from tasks.cephfs.cephfs_test_case import CephFSTestCase
7from tasks.cephfs.fuse_mount import FuseMount
8from teuthology.exceptions import CommandFailedError
eafe8130 9from teuthology.misc import sudo_write_file
7c673cae
FG
10
11log = logging.getLogger(__name__)
12
13
14class TestVolumeClient(CephFSTestCase):
7c673cae
FG
15 # One for looking at the global filesystem, one for being
16 # the VolumeClient, two for mounting the created shares
17 CLIENTS_REQUIRED = 4
eafe8130 18 default_py_version = 'python3'
91327a77
AA
19
20 def setUp(self):
21 CephFSTestCase.setUp(self)
eafe8130
TL
22 self.py_version = self.ctx.config.get('overrides', {}).\
23 get('python', TestVolumeClient.default_py_version)
494da23a
TL
24 log.info("using python version: {python_version}".format(
25 python_version=self.py_version
26 ))
7c673cae
FG
27
28 def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
29 # Can't dedent this *and* the script we pass in, because they might have different
30 # levels of indentation to begin with, so leave this string zero-indented
31 if vol_prefix:
32 vol_prefix = "\"" + vol_prefix + "\""
33 if ns_prefix:
34 ns_prefix = "\"" + ns_prefix + "\""
35 return client.run_python("""
91327a77 36from __future__ import print_function
7c673cae 37from ceph_volume_client import CephFSVolumeClient, VolumePath
eafe8130
TL
38from sys import version_info as sys_version_info
39from rados import OSError as rados_OSError
7c673cae
FG
40import logging
41log = logging.getLogger("ceph_volume_client")
42log.addHandler(logging.StreamHandler())
43log.setLevel(logging.DEBUG)
44vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
45vc.connect()
46{payload}
47vc.disconnect()
91327a77
AA
48 """.format(payload=script, conf_path=client.config_path,
49 vol_prefix=vol_prefix, ns_prefix=ns_prefix),
50 self.py_version)
7c673cae 51
7c673cae
FG
52 def _configure_vc_auth(self, mount, id_name):
53 """
54 Set up auth credentials for the VolumeClient user
55 """
56 out = self.fs.mon_manager.raw_cluster_cmd(
57 "auth", "get-or-create", "client.{name}".format(name=id_name),
58 "mds", "allow *",
59 "osd", "allow rw",
60 "mon", "allow *"
61 )
62 mount.client_id = id_name
eafe8130 63 sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
7c673cae
FG
64 self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
65
66 def _configure_guest_auth(self, volumeclient_mount, guest_mount,
67 guest_entity, mount_path,
68 namespace_prefix=None, readonly=False,
69 tenant_id=None):
70 """
71 Set up auth credentials for the guest client to mount a volume.
72
73 :param volumeclient_mount: mount used as the handle for driving
74 volumeclient.
75 :param guest_mount: mount used by the guest client.
76 :param guest_entity: auth ID used by the guest client.
77 :param mount_path: path of the volume.
78 :param namespace_prefix: name prefix of the RADOS namespace, which
79 is used for the volume's layout.
80 :param readonly: defaults to False. If set to 'True' only read-only
81 mount access is granted to the guest.
82 :param tenant_id: (OpenStack) tenant ID of the guest client.
83 """
84
85 head, volume_id = os.path.split(mount_path)
86 head, group_id = os.path.split(head)
87 head, volume_prefix = os.path.split(head)
88 volume_prefix = "/" + volume_prefix
89
90 # Authorize the guest client's auth ID to mount the volume.
91 key = self._volume_client_python(volumeclient_mount, dedent("""
92 vp = VolumePath("{group_id}", "{volume_id}")
93 auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
94 tenant_id="{tenant_id}")
91327a77 95 print(auth_result['auth_key'])
7c673cae
FG
96 """.format(
97 group_id=group_id,
98 volume_id=volume_id,
99 guest_entity=guest_entity,
100 readonly=readonly,
101 tenant_id=tenant_id)), volume_prefix, namespace_prefix
102 )
103
104 # CephFSVolumeClient's authorize() does not return the secret
105 # key to a caller who isn't multi-tenant aware. Explicitly
106 # query the key for such a client.
107 if not tenant_id:
108 key = self.fs.mon_manager.raw_cluster_cmd(
109 "auth", "get-key", "client.{name}".format(name=guest_entity),
110 )
111
112 # The guest auth ID should exist.
113 existing_ids = [a['entity'] for a in self.auth_list()]
114 self.assertIn("client.{0}".format(guest_entity), existing_ids)
115
116 # Create keyring file for the guest client.
117 keyring_txt = dedent("""
118 [client.{guest_entity}]
119 key = {key}
120
121 """.format(
122 guest_entity=guest_entity,
123 key=key
124 ))
125 guest_mount.client_id = guest_entity
eafe8130
TL
126 sudo_write_file(guest_mount.client_remote,
127 guest_mount.get_keyring_path(), keyring_txt)
7c673cae
FG
128
129 # Add a guest client section to the ceph config file.
130 self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
131 self.set_conf("client.{0}".format(guest_entity), "debug client", "20")
132 self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20")
133 self.set_conf("client.{0}".format(guest_entity),
134 "keyring", guest_mount.get_keyring_path())
135
136 def test_default_prefix(self):
137 group_id = "grpid"
138 volume_id = "volid"
139 DEFAULT_VOL_PREFIX = "volumes"
140 DEFAULT_NS_PREFIX = "fsvolumens_"
141
142 self.mount_b.umount_wait()
143 self._configure_vc_auth(self.mount_b, "manila")
144
145 #create a volume with default prefix
146 self._volume_client_python(self.mount_b, dedent("""
147 vp = VolumePath("{group_id}", "{volume_id}")
148 vc.create_volume(vp, 10, data_isolated=True)
149 """.format(
150 group_id=group_id,
151 volume_id=volume_id,
152 )))
153
154 # The dir should be created
155 self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id))
156
157 #namespace should be set
158 ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace")
159 namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id)
160 self.assertEqual(namespace, ns_in_attr)
161
162
163 def test_lifecycle(self):
164 """
165 General smoke test for create, extend, destroy
166 """
167
168 # I'm going to use mount_c later as a guest for mounting the created
169 # shares
170 self.mounts[2].umount_wait()
171
172 # I'm going to leave mount_b unmounted and just use it as a handle for
173 # driving volumeclient. It's a little hacky but we don't have a more
174 # general concept for librados/libcephfs clients as opposed to full
175 # blown mounting clients.
176 self.mount_b.umount_wait()
177 self._configure_vc_auth(self.mount_b, "manila")
178
179 guest_entity = "guest"
180 group_id = "grpid"
181 volume_id = "volid"
182
183 volume_prefix = "/myprefix"
184 namespace_prefix = "mynsprefix_"
185
186 # Create a 100MB volume
187 volume_size = 100
188 mount_path = self._volume_client_python(self.mount_b, dedent("""
189 vp = VolumePath("{group_id}", "{volume_id}")
190 create_result = vc.create_volume(vp, 1024*1024*{volume_size})
91327a77 191 print(create_result['mount_path'])
7c673cae
FG
192 """.format(
193 group_id=group_id,
194 volume_id=volume_id,
195 volume_size=volume_size
196 )), volume_prefix, namespace_prefix)
197
198 # The dir should be created
199 self.mount_a.stat(os.path.join("myprefix", group_id, volume_id))
200
201 # Authorize and configure credentials for the guest to mount the
202 # the volume.
203 self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
204 mount_path, namespace_prefix)
205 self.mounts[2].mount(mount_path=mount_path)
206
207 # The kernel client doesn't have the quota-based df behaviour,
208 # or quotas at all, so only exercise the client behaviour when
209 # running fuse.
210 if isinstance(self.mounts[2], FuseMount):
211 # df should see volume size, same as the quota set on volume's dir
212 self.assertEqual(self.mounts[2].df()['total'],
213 volume_size * 1024 * 1024)
214 self.assertEqual(
215 self.mount_a.getfattr(
216 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
217 "ceph.quota.max_bytes"),
218 "%s" % (volume_size * 1024 * 1024))
219
220 # df granularity is 4MB block so have to write at least that much
221 data_bin_mb = 4
222 self.mounts[2].write_n_mb("data.bin", data_bin_mb)
223
224 # Write something outside volume to check this space usage is
225 # not reported in the volume's DF.
d2e6a577 226 other_bin_mb = 8
7c673cae
FG
227 self.mount_a.write_n_mb("other.bin", other_bin_mb)
228
229 # global: df should see all the writes (data + other). This is a >
230 # rather than a == because the global spaced used includes all pools
d2e6a577
FG
231 def check_df():
232 used = self.mount_a.df()['used']
233 return used >= (other_bin_mb * 1024 * 1024)
234
235 self.wait_until_true(check_df, timeout=30)
7c673cae
FG
236
237 # Hack: do a metadata IO to kick rstats
238 self.mounts[2].run_shell(["touch", "foo"])
239
240 # volume: df should see the data_bin_mb consumed from quota, same
241 # as the rbytes for the volume's dir
242 self.wait_until_equal(
243 lambda: self.mounts[2].df()['used'],
244 data_bin_mb * 1024 * 1024, timeout=60)
245 self.wait_until_equal(
246 lambda: self.mount_a.getfattr(
247 os.path.join(volume_prefix.strip("/"), group_id, volume_id),
248 "ceph.dir.rbytes"),
249 "%s" % (data_bin_mb * 1024 * 1024), timeout=60)
250
251 # sync so that file data are persist to rados
252 self.mounts[2].run_shell(["sync"])
253
254 # Our data should stay in particular rados namespace
255 pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool")
256 namespace = "{0}{1}".format(namespace_prefix, volume_id)
257 ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace")
258 self.assertEqual(namespace, ns_in_attr)
259
260 objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n"))
261 self.assertNotEqual(objects_in_ns, set())
262
263 # De-authorize the guest
264 self._volume_client_python(self.mount_b, dedent("""
265 vp = VolumePath("{group_id}", "{volume_id}")
266 vc.deauthorize(vp, "{guest_entity}")
267 vc.evict("{guest_entity}")
268 """.format(
269 group_id=group_id,
270 volume_id=volume_id,
271 guest_entity=guest_entity
272 )), volume_prefix, namespace_prefix)
273
274 # Once deauthorized, the client should be unable to do any more metadata ops
275 # The way that the client currently behaves here is to block (it acts like
276 # it has lost network, because there is nothing to tell it that is messages
277 # are being dropped because it's identity is gone)
278 background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False)
28e407b8
AA
279 try:
280 background.wait()
281 except CommandFailedError:
282 # command failed with EBLACKLISTED?
283 if "transport endpoint shutdown" in background.stderr.getvalue():
284 pass
285 else:
286 raise
7c673cae
FG
287
288 # After deauthorisation, the client ID should be gone (this was the only
289 # volume it was authorised for)
290 self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()])
291
292 # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
28e407b8 293 self.mounts[2].umount_wait()
7c673cae
FG
294
295 self._volume_client_python(self.mount_b, dedent("""
296 vp = VolumePath("{group_id}", "{volume_id}")
297 vc.delete_volume(vp)
298 vc.purge_volume(vp)
299 """.format(
300 group_id=group_id,
301 volume_id=volume_id,
302 )), volume_prefix, namespace_prefix)
303
304 def test_idempotency(self):
305 """
306 That the volumeclient interface works when calling everything twice
307 """
308 self.mount_b.umount_wait()
309 self._configure_vc_auth(self.mount_b, "manila")
310
311 guest_entity = "guest"
312 group_id = "grpid"
313 volume_id = "volid"
314 self._volume_client_python(self.mount_b, dedent("""
315 vp = VolumePath("{group_id}", "{volume_id}")
316 vc.create_volume(vp, 10)
317 vc.create_volume(vp, 10)
318 vc.authorize(vp, "{guest_entity}")
319 vc.authorize(vp, "{guest_entity}")
320 vc.deauthorize(vp, "{guest_entity}")
321 vc.deauthorize(vp, "{guest_entity}")
322 vc.delete_volume(vp)
323 vc.delete_volume(vp)
324 vc.purge_volume(vp)
325 vc.purge_volume(vp)
326
327 vc.create_volume(vp, 10, data_isolated=True)
328 vc.create_volume(vp, 10, data_isolated=True)
329 vc.authorize(vp, "{guest_entity}")
330 vc.authorize(vp, "{guest_entity}")
331 vc.deauthorize(vp, "{guest_entity}")
332 vc.deauthorize(vp, "{guest_entity}")
333 vc.evict("{guest_entity}")
334 vc.evict("{guest_entity}")
335 vc.delete_volume(vp, data_isolated=True)
336 vc.delete_volume(vp, data_isolated=True)
337 vc.purge_volume(vp, data_isolated=True)
338 vc.purge_volume(vp, data_isolated=True)
28e407b8
AA
339
340 vc.create_volume(vp, 10, namespace_isolated=False)
341 vc.create_volume(vp, 10, namespace_isolated=False)
342 vc.authorize(vp, "{guest_entity}")
343 vc.authorize(vp, "{guest_entity}")
344 vc.deauthorize(vp, "{guest_entity}")
345 vc.deauthorize(vp, "{guest_entity}")
346 vc.evict("{guest_entity}")
347 vc.evict("{guest_entity}")
348 vc.delete_volume(vp)
349 vc.delete_volume(vp)
350 vc.purge_volume(vp)
351 vc.purge_volume(vp)
7c673cae
FG
352 """.format(
353 group_id=group_id,
354 volume_id=volume_id,
355 guest_entity=guest_entity
356 )))
357
358 def test_data_isolated(self):
359 """
360 That data isolated shares get their own pool
361 :return:
362 """
363
3efd9988 364 # Because the teuthology config template sets mon_max_pg_per_osd to
7c673cae
FG
365 # 10000 (i.e. it just tries to ignore health warnings), reset it to something
366 # sane before using volume_client, to avoid creating pools with absurdly large
367 # numbers of PGs.
3efd9988 368 self.set_conf("global", "mon max pg per osd", "300")
7c673cae
FG
369 for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
370 mon_daemon_state.restart()
371
372 self.mount_b.umount_wait()
373 self._configure_vc_auth(self.mount_b, "manila")
374
375 # Calculate how many PGs we'll expect the new volume pool to have
376 osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
3efd9988 377 max_per_osd = int(self.fs.get_config('mon_max_pg_per_osd'))
7c673cae
FG
378 osd_count = len(osd_map['osds'])
379 max_overall = osd_count * max_per_osd
380
381 existing_pg_count = 0
382 for p in osd_map['pools']:
383 existing_pg_count += p['pg_num']
384
385 expected_pg_num = (max_overall - existing_pg_count) / 10
386 log.info("max_per_osd {0}".format(max_per_osd))
387 log.info("osd_count {0}".format(osd_count))
388 log.info("max_overall {0}".format(max_overall))
389 log.info("existing_pg_count {0}".format(existing_pg_count))
390 log.info("expected_pg_num {0}".format(expected_pg_num))
391
392 pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
393
394 group_id = "grpid"
395 volume_id = "volid"
396 self._volume_client_python(self.mount_b, dedent("""
397 vp = VolumePath("{group_id}", "{volume_id}")
398 vc.create_volume(vp, 10, data_isolated=True)
399 """.format(
400 group_id=group_id,
401 volume_id=volume_id,
402 )))
403
404 pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
405
406 # Should have created one new pool
407 new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
408 self.assertEqual(len(new_pools), 1)
409
410 # It should have followed the heuristic for PG count
411 # (this is an overly strict test condition, so we may want to remove
412 # it at some point as/when the logic gets fancier)
413 created_pg_num = self.fs.mon_manager.get_pool_property(list(new_pools)[0], "pg_num")
414 self.assertEqual(expected_pg_num, created_pg_num)
415
416 def test_15303(self):
417 """
418 Reproducer for #15303 "Client holds incorrect complete flag on dir
419 after losing caps" (http://tracker.ceph.com/issues/15303)
420 """
421 for m in self.mounts:
422 m.umount_wait()
423
424 # Create a dir on mount A
425 self.mount_a.mount()
426 self.mount_a.run_shell(["mkdir", "parent1"])
427 self.mount_a.run_shell(["mkdir", "parent2"])
428 self.mount_a.run_shell(["mkdir", "parent1/mydir"])
429
430 # Put some files in it from mount B
431 self.mount_b.mount()
432 self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
433 self.mount_b.umount_wait()
434
435 # List the dir's contents on mount A
436 self.assertListEqual(self.mount_a.ls("parent1/mydir"),
437 ["afile"])
438
439 def test_evict_client(self):
440 """
441 That a volume client can be evicted based on its auth ID and the volume
442 path it has mounted.
443 """
444
445 if not isinstance(self.mount_a, FuseMount):
446 self.skipTest("Requires FUSE client to inject client metadata")
447
448 # mounts[1] would be used as handle for driving VolumeClient. mounts[2]
449 # and mounts[3] would be used as guests to mount the volumes/shares.
450
451 for i in range(1, 4):
452 self.mounts[i].umount_wait()
453
454 volumeclient_mount = self.mounts[1]
455 self._configure_vc_auth(volumeclient_mount, "manila")
456 guest_mounts = (self.mounts[2], self.mounts[3])
457
458 guest_entity = "guest"
459 group_id = "grpid"
460 mount_paths = []
461 volume_ids = []
462
463 # Create two volumes. Authorize 'guest' auth ID to mount the two
464 # volumes. Mount the two volumes. Write data to the volumes.
465 for i in range(2):
466 # Create volume.
467 volume_ids.append("volid_{0}".format(str(i)))
468 mount_paths.append(
469 self._volume_client_python(volumeclient_mount, dedent("""
470 vp = VolumePath("{group_id}", "{volume_id}")
471 create_result = vc.create_volume(vp, 10 * 1024 * 1024)
91327a77 472 print(create_result['mount_path'])
7c673cae
FG
473 """.format(
474 group_id=group_id,
475 volume_id=volume_ids[i]
476 ))))
477
478 # Authorize 'guest' auth ID to mount the volume.
479 self._configure_guest_auth(volumeclient_mount, guest_mounts[i],
480 guest_entity, mount_paths[i])
481
482 # Mount the volume.
483 guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format(
484 id=guest_entity, suffix=str(i))
485 guest_mounts[i].mount(mount_path=mount_paths[i])
486 guest_mounts[i].write_n_mb("data.bin", 1)
487
488
489 # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted
490 # one volume.
491 self._volume_client_python(self.mount_b, dedent("""
492 vp = VolumePath("{group_id}", "{volume_id}")
493 vc.deauthorize(vp, "{guest_entity}")
494 vc.evict("{guest_entity}", volume_path=vp)
495 """.format(
496 group_id=group_id,
497 volume_id=volume_ids[0],
498 guest_entity=guest_entity
499 )))
500
501 # Evicted guest client, guest_mounts[0], should not be able to do
31f18b77
FG
502 # anymore metadata ops. It should start failing all operations
503 # when it sees that its own address is in the blacklist.
504 try:
505 guest_mounts[0].write_n_mb("rogue.bin", 1)
506 except CommandFailedError:
507 pass
508 else:
509 raise RuntimeError("post-eviction write should have failed!")
510
511 # The blacklisted guest client should now be unmountable
512 guest_mounts[0].umount_wait()
7c673cae
FG
513
514 # Guest client, guest_mounts[1], using the same auth ID 'guest', but
515 # has mounted the other volume, should be able to use its volume
516 # unaffected.
517 guest_mounts[1].write_n_mb("data.bin.1", 1)
518
519 # Cleanup.
520 for i in range(2):
521 self._volume_client_python(volumeclient_mount, dedent("""
522 vp = VolumePath("{group_id}", "{volume_id}")
523 vc.deauthorize(vp, "{guest_entity}")
524 vc.delete_volume(vp)
525 vc.purge_volume(vp)
526 """.format(
527 group_id=group_id,
528 volume_id=volume_ids[i],
529 guest_entity=guest_entity
530 )))
531
7c673cae
FG
532
533 def test_purge(self):
534 """
535 Reproducer for #15266, exception trying to purge volumes that
536 contain non-ascii filenames.
537
538 Additionally test any other purge corner cases here.
539 """
540 # I'm going to leave mount_b unmounted and just use it as a handle for
541 # driving volumeclient. It's a little hacky but we don't have a more
542 # general concept for librados/libcephfs clients as opposed to full
543 # blown mounting clients.
544 self.mount_b.umount_wait()
545 self._configure_vc_auth(self.mount_b, "manila")
546
547 group_id = "grpid"
548 # Use a unicode volume ID (like Manila), to reproduce #15266
549 volume_id = u"volid"
550
551 # Create
552 mount_path = self._volume_client_python(self.mount_b, dedent("""
553 vp = VolumePath("{group_id}", u"{volume_id}")
554 create_result = vc.create_volume(vp, 10)
91327a77 555 print(create_result['mount_path'])
7c673cae
FG
556 """.format(
557 group_id=group_id,
558 volume_id=volume_id
559 )))
560
561 # Strip leading "/"
562 mount_path = mount_path[1:]
563
564 # A file with non-ascii characters
565 self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")])
566
567 # A file with no permissions to do anything
568 self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")])
569 self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")])
570
571 self._volume_client_python(self.mount_b, dedent("""
572 vp = VolumePath("{group_id}", u"{volume_id}")
573 vc.delete_volume(vp)
574 vc.purge_volume(vp)
575 """.format(
576 group_id=group_id,
577 volume_id=volume_id
578 )))
579
580 # Check it's really gone
581 self.assertEqual(self.mount_a.ls("volumes/_deleting"), [])
582 self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id])
583
584 def test_readonly_authorization(self):
585 """
586 That guest clients can be restricted to read-only mounts of volumes.
587 """
588
589 volumeclient_mount = self.mounts[1]
590 guest_mount = self.mounts[2]
591 volumeclient_mount.umount_wait()
592 guest_mount.umount_wait()
593
594 # Configure volumeclient_mount as the handle for driving volumeclient.
595 self._configure_vc_auth(volumeclient_mount, "manila")
596
597 guest_entity = "guest"
598 group_id = "grpid"
599 volume_id = "volid"
600
601 # Create a volume.
602 mount_path = self._volume_client_python(volumeclient_mount, dedent("""
603 vp = VolumePath("{group_id}", "{volume_id}")
604 create_result = vc.create_volume(vp, 1024*1024*10)
91327a77 605 print(create_result['mount_path'])
7c673cae
FG
606 """.format(
607 group_id=group_id,
608 volume_id=volume_id,
609 )))
610
611 # Authorize and configure credentials for the guest to mount the
612 # the volume with read-write access.
613 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
614 mount_path, readonly=False)
615
616 # Mount the volume, and write to it.
617 guest_mount.mount(mount_path=mount_path)
618 guest_mount.write_n_mb("data.bin", 1)
619
620 # Change the guest auth ID's authorization to read-only mount access.
621 self._volume_client_python(volumeclient_mount, dedent("""
622 vp = VolumePath("{group_id}", "{volume_id}")
623 vc.deauthorize(vp, "{guest_entity}")
624 """.format(
625 group_id=group_id,
626 volume_id=volume_id,
627 guest_entity=guest_entity
628 )))
629 self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
630 mount_path, readonly=True)
631
632 # The effect of the change in access level to read-only is not
633 # immediate. The guest sees the change only after a remount of
634 # the volume.
635 guest_mount.umount_wait()
636 guest_mount.mount(mount_path=mount_path)
637
638 # Read existing content of the volume.
639 self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
640 # Cannot write into read-only volume.
641 with self.assertRaises(CommandFailedError):
642 guest_mount.write_n_mb("rogue.bin", 1)
643
644 def test_get_authorized_ids(self):
645 """
646 That for a volume, the authorized IDs and their access levels
647 can be obtained using CephFSVolumeClient's get_authorized_ids().
648 """
649 volumeclient_mount = self.mounts[1]
650 volumeclient_mount.umount_wait()
651
652 # Configure volumeclient_mount as the handle for driving volumeclient.
653 self._configure_vc_auth(volumeclient_mount, "manila")
654
655 group_id = "grpid"
656 volume_id = "volid"
657 guest_entity_1 = "guest1"
658 guest_entity_2 = "guest2"
659
91327a77 660 log.info("print(group ID: {0})".format(group_id))
7c673cae
FG
661
662 # Create a volume.
663 auths = self._volume_client_python(volumeclient_mount, dedent("""
664 vp = VolumePath("{group_id}", "{volume_id}")
665 vc.create_volume(vp, 1024*1024*10)
666 auths = vc.get_authorized_ids(vp)
91327a77 667 print(auths)
7c673cae
FG
668 """.format(
669 group_id=group_id,
670 volume_id=volume_id,
671 )))
672 # Check the list of authorized IDs for the volume.
673 expected_result = None
674 self.assertEqual(str(expected_result), auths)
675
676 # Allow two auth IDs access to the volume.
677 auths = self._volume_client_python(volumeclient_mount, dedent("""
678 vp = VolumePath("{group_id}", "{volume_id}")
679 vc.authorize(vp, "{guest_entity_1}", readonly=False)
680 vc.authorize(vp, "{guest_entity_2}", readonly=True)
681 auths = vc.get_authorized_ids(vp)
91327a77 682 print(auths)
7c673cae
FG
683 """.format(
684 group_id=group_id,
685 volume_id=volume_id,
686 guest_entity_1=guest_entity_1,
687 guest_entity_2=guest_entity_2,
688 )))
689 # Check the list of authorized IDs and their access levels.
91327a77
AA
690 if self.py_version == 'python3':
691 expected_result = [('guest1', 'rw'), ('guest2', 'r')]
692 else:
693 expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
694
7c673cae
FG
695 self.assertItemsEqual(str(expected_result), auths)
696
697 # Disallow both the auth IDs' access to the volume.
698 auths = self._volume_client_python(volumeclient_mount, dedent("""
699 vp = VolumePath("{group_id}", "{volume_id}")
700 vc.deauthorize(vp, "{guest_entity_1}")
701 vc.deauthorize(vp, "{guest_entity_2}")
702 auths = vc.get_authorized_ids(vp)
91327a77 703 print(auths)
7c673cae
FG
704 """.format(
705 group_id=group_id,
706 volume_id=volume_id,
707 guest_entity_1=guest_entity_1,
708 guest_entity_2=guest_entity_2,
709 )))
710 # Check the list of authorized IDs for the volume.
711 expected_result = None
712 self.assertItemsEqual(str(expected_result), auths)
713
714 def test_multitenant_volumes(self):
715 """
716 That volume access can be restricted to a tenant.
717
718 That metadata used to enforce tenant isolation of
719 volumes is stored as a two-way mapping between auth
720 IDs and volumes that they're authorized to access.
721 """
722 volumeclient_mount = self.mounts[1]
723 volumeclient_mount.umount_wait()
724
725 # Configure volumeclient_mount as the handle for driving volumeclient.
726 self._configure_vc_auth(volumeclient_mount, "manila")
727
728 group_id = "groupid"
729 volume_id = "volumeid"
730
731 # Guest clients belonging to different tenants, but using the same
732 # auth ID.
733 auth_id = "guest"
734 guestclient_1 = {
735 "auth_id": auth_id,
736 "tenant_id": "tenant1",
737 }
738 guestclient_2 = {
739 "auth_id": auth_id,
740 "tenant_id": "tenant2",
741 }
742
743 # Create a volume.
744 self._volume_client_python(volumeclient_mount, dedent("""
745 vp = VolumePath("{group_id}", "{volume_id}")
746 vc.create_volume(vp, 1024*1024*10)
747 """.format(
748 group_id=group_id,
749 volume_id=volume_id,
750 )))
751
752 # Check that volume metadata file is created on volume creation.
753 vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id)
754 self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
755
756 # Authorize 'guestclient_1', using auth ID 'guest' and belonging to
757 # 'tenant1', with 'rw' access to the volume.
758 self._volume_client_python(volumeclient_mount, dedent("""
759 vp = VolumePath("{group_id}", "{volume_id}")
760 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
761 """.format(
762 group_id=group_id,
763 volume_id=volume_id,
764 auth_id=guestclient_1["auth_id"],
765 tenant_id=guestclient_1["tenant_id"]
766 )))
767
768 # Check that auth metadata file for auth ID 'guest', is
769 # created on authorizing 'guest' access to the volume.
770 auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"])
771 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
772
773 # Verify that the auth metadata file stores the tenant ID that the
774 # auth ID belongs to, the auth ID's authorized access levels
775 # for different volumes, versioning details, etc.
776 expected_auth_metadata = {
28e407b8
AA
777 "version": 2,
778 "compat_version": 1,
779 "dirty": False,
91327a77 780 "tenant_id": "tenant1",
28e407b8
AA
781 "volumes": {
782 "groupid/volumeid": {
783 "dirty": False,
91327a77 784 "access_level": "rw"
7c673cae
FG
785 }
786 }
787 }
788
789 auth_metadata = self._volume_client_python(volumeclient_mount, dedent("""
28e407b8 790 import json
7c673cae
FG
791 vp = VolumePath("{group_id}", "{volume_id}")
792 auth_metadata = vc._auth_metadata_get("{auth_id}")
28e407b8 793 print(json.dumps(auth_metadata))
7c673cae
FG
794 """.format(
795 group_id=group_id,
796 volume_id=volume_id,
797 auth_id=guestclient_1["auth_id"],
798 )))
28e407b8 799 auth_metadata = json.loads(auth_metadata)
7c673cae 800
28e407b8
AA
801 self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"])
802 del expected_auth_metadata["version"]
803 del auth_metadata["version"]
804 self.assertEqual(expected_auth_metadata, auth_metadata)
7c673cae
FG
805
806 # Verify that the volume metadata file stores info about auth IDs
807 # and their access levels to the volume, versioning details, etc.
808 expected_vol_metadata = {
28e407b8
AA
809 "version": 2,
810 "compat_version": 1,
811 "auths": {
812 "guest": {
813 "dirty": False,
91327a77 814 "access_level": "rw"
7c673cae
FG
815 }
816 }
817 }
818
819 vol_metadata = self._volume_client_python(volumeclient_mount, dedent("""
28e407b8 820 import json
7c673cae
FG
821 vp = VolumePath("{group_id}", "{volume_id}")
822 volume_metadata = vc._volume_metadata_get(vp)
28e407b8 823 print(json.dumps(volume_metadata))
7c673cae
FG
824 """.format(
825 group_id=group_id,
826 volume_id=volume_id,
827 )))
28e407b8
AA
828 vol_metadata = json.loads(vol_metadata)
829
830 self.assertGreaterEqual(vol_metadata["version"], expected_vol_metadata["version"])
831 del expected_vol_metadata["version"]
832 del vol_metadata["version"]
833 self.assertEqual(expected_vol_metadata, vol_metadata)
7c673cae
FG
834
835 # Cannot authorize 'guestclient_2' to access the volume.
836 # It uses auth ID 'guest', which has already been used by a
837 # 'guestclient_1' belonging to an another tenant for accessing
838 # the volume.
839 with self.assertRaises(CommandFailedError):
840 self._volume_client_python(volumeclient_mount, dedent("""
841 vp = VolumePath("{group_id}", "{volume_id}")
842 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
843 """.format(
844 group_id=group_id,
845 volume_id=volume_id,
846 auth_id=guestclient_2["auth_id"],
847 tenant_id=guestclient_2["tenant_id"]
848 )))
849
850 # Check that auth metadata file is cleaned up on removing
851 # auth ID's only access to a volume.
852 self._volume_client_python(volumeclient_mount, dedent("""
853 vp = VolumePath("{group_id}", "{volume_id}")
854 vc.deauthorize(vp, "{guest_entity}")
855 """.format(
856 group_id=group_id,
857 volume_id=volume_id,
858 guest_entity=guestclient_1["auth_id"]
859 )))
860
861 self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
862
863 # Check that volume metadata file is cleaned up on volume deletion.
864 self._volume_client_python(volumeclient_mount, dedent("""
865 vp = VolumePath("{group_id}", "{volume_id}")
866 vc.delete_volume(vp)
867 """.format(
868 group_id=group_id,
869 volume_id=volume_id,
870 )))
871 self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
872
873 def test_recover_metadata(self):
874 """
875 That volume client can recover from partial auth updates using
876 metadata files, which store auth info and its update status info.
877 """
878 volumeclient_mount = self.mounts[1]
879 volumeclient_mount.umount_wait()
880
881 # Configure volumeclient_mount as the handle for driving volumeclient.
882 self._configure_vc_auth(volumeclient_mount, "manila")
883
884 group_id = "groupid"
885 volume_id = "volumeid"
886
887 guestclient = {
888 "auth_id": "guest",
889 "tenant_id": "tenant",
890 }
891
892 # Create a volume.
893 self._volume_client_python(volumeclient_mount, dedent("""
894 vp = VolumePath("{group_id}", "{volume_id}")
895 vc.create_volume(vp, 1024*1024*10)
896 """.format(
897 group_id=group_id,
898 volume_id=volume_id,
899 )))
900
901 # Authorize 'guestclient' access to the volume.
902 self._volume_client_python(volumeclient_mount, dedent("""
903 vp = VolumePath("{group_id}", "{volume_id}")
904 vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
905 """.format(
906 group_id=group_id,
907 volume_id=volume_id,
908 auth_id=guestclient["auth_id"],
909 tenant_id=guestclient["tenant_id"]
910 )))
911
912 # Check that auth metadata file for auth ID 'guest' is created.
913 auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"])
914 self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes"))
915
916 # Induce partial auth update state by modifying the auth metadata file,
917 # and then run recovery procedure.
918 self._volume_client_python(volumeclient_mount, dedent("""
919 vp = VolumePath("{group_id}", "{volume_id}")
920 auth_metadata = vc._auth_metadata_get("{auth_id}")
921 auth_metadata['dirty'] = True
922 vc._auth_metadata_set("{auth_id}", auth_metadata)
923 vc.recover()
924 """.format(
925 group_id=group_id,
926 volume_id=volume_id,
927 auth_id=guestclient["auth_id"],
928 )))
3efd9988
FG
929
930 def test_put_object(self):
931 vc_mount = self.mounts[1]
932 vc_mount.umount_wait()
933 self._configure_vc_auth(vc_mount, "manila")
934
935 obj_data = 'test data'
936 obj_name = 'test_vc_obj_1'
937 pool_name = self.fs.get_data_pool_names()[0]
938
939 self._volume_client_python(vc_mount, dedent("""
940 vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}")
941 """.format(
942 pool_name = pool_name,
943 obj_name = obj_name,
944 obj_data = obj_data
945 )))
946
947 read_data = self.fs.rados(['get', obj_name, '-'], pool=pool_name)
948 self.assertEqual(obj_data, read_data)
949
950 def test_get_object(self):
951 vc_mount = self.mounts[1]
952 vc_mount.umount_wait()
953 self._configure_vc_auth(vc_mount, "manila")
954
955 obj_data = 'test_data'
956 obj_name = 'test_vc_ob_2'
957 pool_name = self.fs.get_data_pool_names()[0]
958
959 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
960
961 self._volume_client_python(vc_mount, dedent("""
962 data_read = vc.get_object("{pool_name}", "{obj_name}")
963 assert data_read == b"{obj_data}"
964 """.format(
965 pool_name = pool_name,
966 obj_name = obj_name,
967 obj_data = obj_data
968 )))
969
91327a77
AA
970 def test_put_object_versioned(self):
971 vc_mount = self.mounts[1]
972 vc_mount.umount_wait()
973 self._configure_vc_auth(vc_mount, "manila")
974
eafe8130
TL
975 obj_data = 'test_data'
976 obj_name = 'test_vc_obj'
977 pool_name = self.fs.get_data_pool_names()[0]
978 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
979
980 self._volume_client_python(vc_mount, dedent("""
981 data, version_before = vc.get_object_and_version("{pool_name}", "{obj_name}")
982
983 if sys_version_info.major < 3:
984 data = data + 'modification1'
985 elif sys_version_info.major > 3:
986 data = str.encode(data.decode() + 'modification1')
987
988 vc.put_object_versioned("{pool_name}", "{obj_name}", data, version_before)
989 data, version_after = vc.get_object_and_version("{pool_name}", "{obj_name}")
990 assert version_after == version_before + 1
991 """).format(pool_name=pool_name, obj_name=obj_name))
992
993 def test_version_check_for_put_object_versioned(self):
994 vc_mount = self.mounts[1]
995 vc_mount.umount_wait()
996 self._configure_vc_auth(vc_mount, "manila")
997
91327a77
AA
998 obj_data = 'test_data'
999 obj_name = 'test_vc_ob_2'
1000 pool_name = self.fs.get_data_pool_names()[0]
1001 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
1002
1003 # Test if put_object_versioned() crosschecks the version of the
1004 # given object. Being a negative test, an exception is expected.
eafe8130
TL
1005 expected_exception = 'rados_OSError'
1006 output = self._volume_client_python(vc_mount, dedent("""
1007 data, version = vc.get_object_and_version("{pool_name}", "{obj_name}")
1008
1009 if sys_version_info.major < 3:
1010 data = data + 'm1'
1011 elif sys_version_info.major > 3:
1012 data = str.encode(data.decode('utf-8') + 'm1')
1013
1014 vc.put_object("{pool_name}", "{obj_name}", data)
1015
1016 if sys_version_info.major < 3:
1017 data = data + 'm2'
1018 elif sys_version_info.major > 3:
1019 data = str.encode(data.decode('utf-8') + 'm2')
1020
1021 try:
91327a77 1022 vc.put_object_versioned("{pool_name}", "{obj_name}", data, version)
eafe8130
TL
1023 except {expected_exception}:
1024 print('{expected_exception} raised')
1025 """).format(pool_name=pool_name, obj_name=obj_name,
1026 expected_exception=expected_exception))
1027 self.assertEqual(expected_exception + ' raised', output)
1028
91327a77 1029
3efd9988
FG
1030 def test_delete_object(self):
1031 vc_mount = self.mounts[1]
1032 vc_mount.umount_wait()
1033 self._configure_vc_auth(vc_mount, "manila")
1034
1035 obj_data = 'test data'
1036 obj_name = 'test_vc_obj_3'
1037 pool_name = self.fs.get_data_pool_names()[0]
1038
1039 self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
1040
1041 self._volume_client_python(vc_mount, dedent("""
1042 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1043 """.format(
1044 pool_name = pool_name,
1045 obj_name = obj_name,
1046 )))
1047
1048 with self.assertRaises(CommandFailedError):
1049 self.fs.rados(['stat', obj_name], pool=pool_name)
1050
1051 # Check idempotency -- no error raised trying to delete non-existent
1052 # object
1053 self._volume_client_python(vc_mount, dedent("""
1054 data_read = vc.delete_object("{pool_name}", "{obj_name}")
1055 """.format(
1056 pool_name = pool_name,
1057 obj_name = obj_name,
1058 )))
1059
1060 def test_21501(self):
1061 """
1062 Reproducer for #21501 "ceph_volume_client: sets invalid caps for
1063 existing IDs with no caps" (http://tracker.ceph.com/issues/21501)
1064 """
1065
1066 vc_mount = self.mounts[1]
1067 vc_mount.umount_wait()
1068
1069 # Configure vc_mount as the handle for driving volumeclient
1070 self._configure_vc_auth(vc_mount, "manila")
1071
1072 # Create a volume
1073 group_id = "grpid"
1074 volume_id = "volid"
1075 mount_path = self._volume_client_python(vc_mount, dedent("""
1076 vp = VolumePath("{group_id}", "{volume_id}")
1077 create_result = vc.create_volume(vp, 1024*1024*10)
91327a77 1078 print(create_result['mount_path'])
3efd9988
FG
1079 """.format(
1080 group_id=group_id,
1081 volume_id=volume_id
1082 )))
1083
1084 # Create an auth ID with no caps
1085 guest_id = '21501'
1086 self.fs.mon_manager.raw_cluster_cmd_result(
1087 'auth', 'get-or-create', 'client.{0}'.format(guest_id))
1088
1089 guest_mount = self.mounts[2]
1090 guest_mount.umount_wait()
1091
1092 # Set auth caps for the auth ID using the volumeclient
1093 self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path)
1094
1095 # Mount the volume in the guest using the auth ID to assert that the
1096 # auth caps are valid
1097 guest_mount.mount(mount_path=mount_path)
28e407b8
AA
1098
1099 def test_volume_without_namespace_isolation(self):
1100 """
1101 That volume client can create volumes that do not have separate RADOS
1102 namespace layouts.
1103 """
1104 vc_mount = self.mounts[1]
1105 vc_mount.umount_wait()
1106
1107 # Configure vc_mount as the handle for driving volumeclient
1108 self._configure_vc_auth(vc_mount, "manila")
1109
1110 # Create a volume
1111 volume_prefix = "/myprefix"
1112 group_id = "grpid"
1113 volume_id = "volid"
1114 mount_path = self._volume_client_python(vc_mount, dedent("""
1115 vp = VolumePath("{group_id}", "{volume_id}")
1116 create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
91327a77 1117 print(create_result['mount_path'])
28e407b8
AA
1118 """.format(
1119 group_id=group_id,
1120 volume_id=volume_id
1121 )), volume_prefix)
1122
1123 # The CephFS volume should be created
1124 self.mounts[0].stat(os.path.join("myprefix", group_id, volume_id))
1125 vol_namespace = self.mounts[0].getfattr(
1126 os.path.join("myprefix", group_id, volume_id),
1127 "ceph.dir.layout.pool_namespace")
1128 assert not vol_namespace
1129
1130 self._volume_client_python(vc_mount, dedent("""
1131 vp = VolumePath("{group_id}", "{volume_id}")
1132 vc.delete_volume(vp)
1133 vc.purge_volume(vp)
1134 """.format(
1135 group_id=group_id,
1136 volume_id=volume_id,
1137 )), volume_prefix)