]>
Commit | Line | Data |
---|---|---|
b3b6e05e | 1 | from io import StringIO |
7c673cae FG |
2 | import json |
3 | import logging | |
7c673cae FG |
4 | import os |
5 | from textwrap import dedent | |
6 | from tasks.cephfs.cephfs_test_case import CephFSTestCase | |
7 | from tasks.cephfs.fuse_mount import FuseMount | |
8 | from teuthology.exceptions import CommandFailedError | |
9 | ||
10 | log = logging.getLogger(__name__) | |
11 | ||
12 | ||
13 | class TestVolumeClient(CephFSTestCase): | |
7c673cae FG |
14 | # One for looking at the global filesystem, one for being |
15 | # the VolumeClient, two for mounting the created shares | |
16 | CLIENTS_REQUIRED = 4 | |
91327a77 AA |
17 | |
18 | def setUp(self): | |
19 | CephFSTestCase.setUp(self) | |
7c673cae FG |
20 | |
21 | def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None): | |
22 | # Can't dedent this *and* the script we pass in, because they might have different | |
23 | # levels of indentation to begin with, so leave this string zero-indented | |
24 | if vol_prefix: | |
25 | vol_prefix = "\"" + vol_prefix + "\"" | |
26 | if ns_prefix: | |
27 | ns_prefix = "\"" + ns_prefix + "\"" | |
28 | return client.run_python(""" | |
91327a77 | 29 | from __future__ import print_function |
7c673cae | 30 | from ceph_volume_client import CephFSVolumeClient, VolumePath |
eafe8130 TL |
31 | from sys import version_info as sys_version_info |
32 | from rados import OSError as rados_OSError | |
7c673cae FG |
33 | import logging |
34 | log = logging.getLogger("ceph_volume_client") | |
35 | log.addHandler(logging.StreamHandler()) | |
36 | log.setLevel(logging.DEBUG) | |
37 | vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix}) | |
38 | vc.connect() | |
39 | {payload} | |
40 | vc.disconnect() | |
91327a77 | 41 | """.format(payload=script, conf_path=client.config_path, |
f67539c2 | 42 | vol_prefix=vol_prefix, ns_prefix=ns_prefix)) |
7c673cae | 43 | |
7c673cae FG |
44 | def _configure_vc_auth(self, mount, id_name): |
45 | """ | |
46 | Set up auth credentials for the VolumeClient user | |
47 | """ | |
48 | out = self.fs.mon_manager.raw_cluster_cmd( | |
49 | "auth", "get-or-create", "client.{name}".format(name=id_name), | |
50 | "mds", "allow *", | |
51 | "osd", "allow rw", | |
52 | "mon", "allow *" | |
53 | ) | |
54 | mount.client_id = id_name | |
f67539c2 TL |
55 | mount.client_remote.write_file(mount.get_keyring_path(), |
56 | out, sudo=True) | |
7c673cae FG |
57 | self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path()) |
58 | ||
59 | def _configure_guest_auth(self, volumeclient_mount, guest_mount, | |
f67539c2 | 60 | guest_entity, cephfs_mntpt, |
7c673cae | 61 | namespace_prefix=None, readonly=False, |
f91f0fd5 | 62 | tenant_id=None, allow_existing_id=False): |
7c673cae FG |
63 | """ |
64 | Set up auth credentials for the guest client to mount a volume. | |
65 | ||
66 | :param volumeclient_mount: mount used as the handle for driving | |
67 | volumeclient. | |
68 | :param guest_mount: mount used by the guest client. | |
69 | :param guest_entity: auth ID used by the guest client. | |
f67539c2 | 70 | :param cephfs_mntpt: path of the volume. |
7c673cae FG |
71 | :param namespace_prefix: name prefix of the RADOS namespace, which |
72 | is used for the volume's layout. | |
73 | :param readonly: defaults to False. If set to 'True' only read-only | |
74 | mount access is granted to the guest. | |
75 | :param tenant_id: (OpenStack) tenant ID of the guest client. | |
76 | """ | |
77 | ||
f67539c2 | 78 | head, volume_id = os.path.split(cephfs_mntpt) |
7c673cae FG |
79 | head, group_id = os.path.split(head) |
80 | head, volume_prefix = os.path.split(head) | |
81 | volume_prefix = "/" + volume_prefix | |
82 | ||
83 | # Authorize the guest client's auth ID to mount the volume. | |
84 | key = self._volume_client_python(volumeclient_mount, dedent(""" | |
85 | vp = VolumePath("{group_id}", "{volume_id}") | |
86 | auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly}, | |
f91f0fd5 TL |
87 | tenant_id="{tenant_id}", |
88 | allow_existing_id="{allow_existing_id}") | |
91327a77 | 89 | print(auth_result['auth_key']) |
7c673cae FG |
90 | """.format( |
91 | group_id=group_id, | |
92 | volume_id=volume_id, | |
93 | guest_entity=guest_entity, | |
94 | readonly=readonly, | |
f91f0fd5 TL |
95 | tenant_id=tenant_id, |
96 | allow_existing_id=allow_existing_id)), volume_prefix, namespace_prefix | |
7c673cae FG |
97 | ) |
98 | ||
99 | # CephFSVolumeClient's authorize() does not return the secret | |
100 | # key to a caller who isn't multi-tenant aware. Explicitly | |
101 | # query the key for such a client. | |
102 | if not tenant_id: | |
103 | key = self.fs.mon_manager.raw_cluster_cmd( | |
104 | "auth", "get-key", "client.{name}".format(name=guest_entity), | |
105 | ) | |
106 | ||
107 | # The guest auth ID should exist. | |
108 | existing_ids = [a['entity'] for a in self.auth_list()] | |
109 | self.assertIn("client.{0}".format(guest_entity), existing_ids) | |
110 | ||
111 | # Create keyring file for the guest client. | |
112 | keyring_txt = dedent(""" | |
113 | [client.{guest_entity}] | |
114 | key = {key} | |
115 | ||
116 | """.format( | |
117 | guest_entity=guest_entity, | |
118 | key=key | |
119 | )) | |
120 | guest_mount.client_id = guest_entity | |
f67539c2 TL |
121 | guest_mount.client_remote.write_file(guest_mount.get_keyring_path(), |
122 | keyring_txt, sudo=True) | |
7c673cae FG |
123 | |
124 | # Add a guest client section to the ceph config file. | |
125 | self.set_conf("client.{0}".format(guest_entity), "client quota", "True") | |
126 | self.set_conf("client.{0}".format(guest_entity), "debug client", "20") | |
127 | self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20") | |
128 | self.set_conf("client.{0}".format(guest_entity), | |
129 | "keyring", guest_mount.get_keyring_path()) | |
130 | ||
131 | def test_default_prefix(self): | |
132 | group_id = "grpid" | |
133 | volume_id = "volid" | |
134 | DEFAULT_VOL_PREFIX = "volumes" | |
135 | DEFAULT_NS_PREFIX = "fsvolumens_" | |
136 | ||
137 | self.mount_b.umount_wait() | |
138 | self._configure_vc_auth(self.mount_b, "manila") | |
139 | ||
140 | #create a volume with default prefix | |
141 | self._volume_client_python(self.mount_b, dedent(""" | |
142 | vp = VolumePath("{group_id}", "{volume_id}") | |
143 | vc.create_volume(vp, 10, data_isolated=True) | |
144 | """.format( | |
145 | group_id=group_id, | |
146 | volume_id=volume_id, | |
147 | ))) | |
148 | ||
149 | # The dir should be created | |
150 | self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id)) | |
151 | ||
152 | #namespace should be set | |
153 | ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace") | |
154 | namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id) | |
155 | self.assertEqual(namespace, ns_in_attr) | |
156 | ||
157 | ||
158 | def test_lifecycle(self): | |
159 | """ | |
160 | General smoke test for create, extend, destroy | |
161 | """ | |
162 | ||
163 | # I'm going to use mount_c later as a guest for mounting the created | |
164 | # shares | |
165 | self.mounts[2].umount_wait() | |
166 | ||
167 | # I'm going to leave mount_b unmounted and just use it as a handle for | |
168 | # driving volumeclient. It's a little hacky but we don't have a more | |
169 | # general concept for librados/libcephfs clients as opposed to full | |
170 | # blown mounting clients. | |
171 | self.mount_b.umount_wait() | |
172 | self._configure_vc_auth(self.mount_b, "manila") | |
173 | ||
174 | guest_entity = "guest" | |
175 | group_id = "grpid" | |
176 | volume_id = "volid" | |
177 | ||
178 | volume_prefix = "/myprefix" | |
179 | namespace_prefix = "mynsprefix_" | |
180 | ||
181 | # Create a 100MB volume | |
182 | volume_size = 100 | |
f67539c2 | 183 | cephfs_mntpt = self._volume_client_python(self.mount_b, dedent(""" |
7c673cae FG |
184 | vp = VolumePath("{group_id}", "{volume_id}") |
185 | create_result = vc.create_volume(vp, 1024*1024*{volume_size}) | |
91327a77 | 186 | print(create_result['mount_path']) |
7c673cae FG |
187 | """.format( |
188 | group_id=group_id, | |
189 | volume_id=volume_id, | |
190 | volume_size=volume_size | |
191 | )), volume_prefix, namespace_prefix) | |
192 | ||
193 | # The dir should be created | |
194 | self.mount_a.stat(os.path.join("myprefix", group_id, volume_id)) | |
195 | ||
196 | # Authorize and configure credentials for the guest to mount the | |
197 | # the volume. | |
198 | self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity, | |
f67539c2 TL |
199 | cephfs_mntpt, namespace_prefix) |
200 | self.mounts[2].mount_wait(cephfs_mntpt=cephfs_mntpt) | |
7c673cae FG |
201 | |
202 | # The kernel client doesn't have the quota-based df behaviour, | |
203 | # or quotas at all, so only exercise the client behaviour when | |
204 | # running fuse. | |
205 | if isinstance(self.mounts[2], FuseMount): | |
206 | # df should see volume size, same as the quota set on volume's dir | |
207 | self.assertEqual(self.mounts[2].df()['total'], | |
208 | volume_size * 1024 * 1024) | |
209 | self.assertEqual( | |
210 | self.mount_a.getfattr( | |
211 | os.path.join(volume_prefix.strip("/"), group_id, volume_id), | |
212 | "ceph.quota.max_bytes"), | |
213 | "%s" % (volume_size * 1024 * 1024)) | |
214 | ||
215 | # df granularity is 4MB block so have to write at least that much | |
216 | data_bin_mb = 4 | |
217 | self.mounts[2].write_n_mb("data.bin", data_bin_mb) | |
218 | ||
219 | # Write something outside volume to check this space usage is | |
220 | # not reported in the volume's DF. | |
d2e6a577 | 221 | other_bin_mb = 8 |
7c673cae FG |
222 | self.mount_a.write_n_mb("other.bin", other_bin_mb) |
223 | ||
224 | # global: df should see all the writes (data + other). This is a > | |
225 | # rather than a == because the global spaced used includes all pools | |
d2e6a577 FG |
226 | def check_df(): |
227 | used = self.mount_a.df()['used'] | |
228 | return used >= (other_bin_mb * 1024 * 1024) | |
229 | ||
230 | self.wait_until_true(check_df, timeout=30) | |
7c673cae FG |
231 | |
232 | # Hack: do a metadata IO to kick rstats | |
233 | self.mounts[2].run_shell(["touch", "foo"]) | |
234 | ||
235 | # volume: df should see the data_bin_mb consumed from quota, same | |
236 | # as the rbytes for the volume's dir | |
237 | self.wait_until_equal( | |
238 | lambda: self.mounts[2].df()['used'], | |
239 | data_bin_mb * 1024 * 1024, timeout=60) | |
240 | self.wait_until_equal( | |
241 | lambda: self.mount_a.getfattr( | |
242 | os.path.join(volume_prefix.strip("/"), group_id, volume_id), | |
243 | "ceph.dir.rbytes"), | |
244 | "%s" % (data_bin_mb * 1024 * 1024), timeout=60) | |
245 | ||
246 | # sync so that file data are persist to rados | |
247 | self.mounts[2].run_shell(["sync"]) | |
248 | ||
249 | # Our data should stay in particular rados namespace | |
250 | pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool") | |
251 | namespace = "{0}{1}".format(namespace_prefix, volume_id) | |
252 | ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace") | |
253 | self.assertEqual(namespace, ns_in_attr) | |
254 | ||
b3b6e05e | 255 | objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace, stdout=StringIO()).stdout.getvalue().split("\n")) |
7c673cae FG |
256 | self.assertNotEqual(objects_in_ns, set()) |
257 | ||
258 | # De-authorize the guest | |
259 | self._volume_client_python(self.mount_b, dedent(""" | |
260 | vp = VolumePath("{group_id}", "{volume_id}") | |
261 | vc.deauthorize(vp, "{guest_entity}") | |
262 | vc.evict("{guest_entity}") | |
263 | """.format( | |
264 | group_id=group_id, | |
265 | volume_id=volume_id, | |
266 | guest_entity=guest_entity | |
267 | )), volume_prefix, namespace_prefix) | |
268 | ||
269 | # Once deauthorized, the client should be unable to do any more metadata ops | |
270 | # The way that the client currently behaves here is to block (it acts like | |
271 | # it has lost network, because there is nothing to tell it that is messages | |
272 | # are being dropped because it's identity is gone) | |
273 | background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False) | |
28e407b8 AA |
274 | try: |
275 | background.wait() | |
276 | except CommandFailedError: | |
f67539c2 | 277 | # command failed with EBLOCKLISTED? |
28e407b8 AA |
278 | if "transport endpoint shutdown" in background.stderr.getvalue(): |
279 | pass | |
280 | else: | |
281 | raise | |
7c673cae FG |
282 | |
283 | # After deauthorisation, the client ID should be gone (this was the only | |
284 | # volume it was authorised for) | |
285 | self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()]) | |
286 | ||
287 | # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined) | |
28e407b8 | 288 | self.mounts[2].umount_wait() |
7c673cae FG |
289 | |
290 | self._volume_client_python(self.mount_b, dedent(""" | |
291 | vp = VolumePath("{group_id}", "{volume_id}") | |
292 | vc.delete_volume(vp) | |
293 | vc.purge_volume(vp) | |
294 | """.format( | |
295 | group_id=group_id, | |
296 | volume_id=volume_id, | |
297 | )), volume_prefix, namespace_prefix) | |
298 | ||
299 | def test_idempotency(self): | |
300 | """ | |
301 | That the volumeclient interface works when calling everything twice | |
302 | """ | |
303 | self.mount_b.umount_wait() | |
304 | self._configure_vc_auth(self.mount_b, "manila") | |
305 | ||
306 | guest_entity = "guest" | |
307 | group_id = "grpid" | |
308 | volume_id = "volid" | |
309 | self._volume_client_python(self.mount_b, dedent(""" | |
310 | vp = VolumePath("{group_id}", "{volume_id}") | |
311 | vc.create_volume(vp, 10) | |
312 | vc.create_volume(vp, 10) | |
313 | vc.authorize(vp, "{guest_entity}") | |
314 | vc.authorize(vp, "{guest_entity}") | |
315 | vc.deauthorize(vp, "{guest_entity}") | |
316 | vc.deauthorize(vp, "{guest_entity}") | |
317 | vc.delete_volume(vp) | |
318 | vc.delete_volume(vp) | |
319 | vc.purge_volume(vp) | |
320 | vc.purge_volume(vp) | |
321 | ||
322 | vc.create_volume(vp, 10, data_isolated=True) | |
323 | vc.create_volume(vp, 10, data_isolated=True) | |
324 | vc.authorize(vp, "{guest_entity}") | |
325 | vc.authorize(vp, "{guest_entity}") | |
326 | vc.deauthorize(vp, "{guest_entity}") | |
327 | vc.deauthorize(vp, "{guest_entity}") | |
328 | vc.evict("{guest_entity}") | |
329 | vc.evict("{guest_entity}") | |
330 | vc.delete_volume(vp, data_isolated=True) | |
331 | vc.delete_volume(vp, data_isolated=True) | |
332 | vc.purge_volume(vp, data_isolated=True) | |
333 | vc.purge_volume(vp, data_isolated=True) | |
28e407b8 AA |
334 | |
335 | vc.create_volume(vp, 10, namespace_isolated=False) | |
336 | vc.create_volume(vp, 10, namespace_isolated=False) | |
337 | vc.authorize(vp, "{guest_entity}") | |
338 | vc.authorize(vp, "{guest_entity}") | |
339 | vc.deauthorize(vp, "{guest_entity}") | |
340 | vc.deauthorize(vp, "{guest_entity}") | |
341 | vc.evict("{guest_entity}") | |
342 | vc.evict("{guest_entity}") | |
343 | vc.delete_volume(vp) | |
344 | vc.delete_volume(vp) | |
345 | vc.purge_volume(vp) | |
346 | vc.purge_volume(vp) | |
7c673cae FG |
347 | """.format( |
348 | group_id=group_id, | |
349 | volume_id=volume_id, | |
350 | guest_entity=guest_entity | |
351 | ))) | |
352 | ||
353 | def test_data_isolated(self): | |
354 | """ | |
355 | That data isolated shares get their own pool | |
356 | :return: | |
357 | """ | |
358 | ||
7c673cae FG |
359 | self.mount_b.umount_wait() |
360 | self._configure_vc_auth(self.mount_b, "manila") | |
361 | ||
7c673cae FG |
362 | pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools'] |
363 | ||
364 | group_id = "grpid" | |
365 | volume_id = "volid" | |
366 | self._volume_client_python(self.mount_b, dedent(""" | |
367 | vp = VolumePath("{group_id}", "{volume_id}") | |
9f95a23c | 368 | vc.create_volume(vp, data_isolated=True) |
7c673cae FG |
369 | """.format( |
370 | group_id=group_id, | |
371 | volume_id=volume_id, | |
372 | ))) | |
373 | ||
374 | pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools'] | |
375 | ||
376 | # Should have created one new pool | |
377 | new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a]) | |
378 | self.assertEqual(len(new_pools), 1) | |
379 | ||
7c673cae FG |
380 | def test_15303(self): |
381 | """ | |
382 | Reproducer for #15303 "Client holds incorrect complete flag on dir | |
383 | after losing caps" (http://tracker.ceph.com/issues/15303) | |
384 | """ | |
385 | for m in self.mounts: | |
386 | m.umount_wait() | |
387 | ||
388 | # Create a dir on mount A | |
e306af50 | 389 | self.mount_a.mount_wait() |
7c673cae FG |
390 | self.mount_a.run_shell(["mkdir", "parent1"]) |
391 | self.mount_a.run_shell(["mkdir", "parent2"]) | |
392 | self.mount_a.run_shell(["mkdir", "parent1/mydir"]) | |
393 | ||
394 | # Put some files in it from mount B | |
e306af50 | 395 | self.mount_b.mount_wait() |
7c673cae FG |
396 | self.mount_b.run_shell(["touch", "parent1/mydir/afile"]) |
397 | self.mount_b.umount_wait() | |
398 | ||
399 | # List the dir's contents on mount A | |
400 | self.assertListEqual(self.mount_a.ls("parent1/mydir"), | |
401 | ["afile"]) | |
402 | ||
403 | def test_evict_client(self): | |
404 | """ | |
405 | That a volume client can be evicted based on its auth ID and the volume | |
406 | path it has mounted. | |
407 | """ | |
408 | ||
409 | if not isinstance(self.mount_a, FuseMount): | |
410 | self.skipTest("Requires FUSE client to inject client metadata") | |
411 | ||
412 | # mounts[1] would be used as handle for driving VolumeClient. mounts[2] | |
413 | # and mounts[3] would be used as guests to mount the volumes/shares. | |
414 | ||
415 | for i in range(1, 4): | |
416 | self.mounts[i].umount_wait() | |
417 | ||
418 | volumeclient_mount = self.mounts[1] | |
419 | self._configure_vc_auth(volumeclient_mount, "manila") | |
420 | guest_mounts = (self.mounts[2], self.mounts[3]) | |
421 | ||
422 | guest_entity = "guest" | |
423 | group_id = "grpid" | |
f67539c2 | 424 | cephfs_mntpts = [] |
7c673cae FG |
425 | volume_ids = [] |
426 | ||
427 | # Create two volumes. Authorize 'guest' auth ID to mount the two | |
428 | # volumes. Mount the two volumes. Write data to the volumes. | |
429 | for i in range(2): | |
430 | # Create volume. | |
431 | volume_ids.append("volid_{0}".format(str(i))) | |
f67539c2 | 432 | cephfs_mntpts.append( |
7c673cae FG |
433 | self._volume_client_python(volumeclient_mount, dedent(""" |
434 | vp = VolumePath("{group_id}", "{volume_id}") | |
435 | create_result = vc.create_volume(vp, 10 * 1024 * 1024) | |
91327a77 | 436 | print(create_result['mount_path']) |
7c673cae FG |
437 | """.format( |
438 | group_id=group_id, | |
439 | volume_id=volume_ids[i] | |
440 | )))) | |
441 | ||
442 | # Authorize 'guest' auth ID to mount the volume. | |
443 | self._configure_guest_auth(volumeclient_mount, guest_mounts[i], | |
f67539c2 | 444 | guest_entity, cephfs_mntpts[i]) |
7c673cae FG |
445 | |
446 | # Mount the volume. | |
447 | guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format( | |
448 | id=guest_entity, suffix=str(i)) | |
f67539c2 | 449 | guest_mounts[i].mount_wait(cephfs_mntpt=cephfs_mntpts[i]) |
7c673cae FG |
450 | guest_mounts[i].write_n_mb("data.bin", 1) |
451 | ||
452 | ||
453 | # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted | |
454 | # one volume. | |
455 | self._volume_client_python(self.mount_b, dedent(""" | |
456 | vp = VolumePath("{group_id}", "{volume_id}") | |
457 | vc.deauthorize(vp, "{guest_entity}") | |
458 | vc.evict("{guest_entity}", volume_path=vp) | |
459 | """.format( | |
460 | group_id=group_id, | |
461 | volume_id=volume_ids[0], | |
462 | guest_entity=guest_entity | |
463 | ))) | |
464 | ||
465 | # Evicted guest client, guest_mounts[0], should not be able to do | |
31f18b77 | 466 | # anymore metadata ops. It should start failing all operations |
f67539c2 | 467 | # when it sees that its own address is in the blocklist. |
31f18b77 FG |
468 | try: |
469 | guest_mounts[0].write_n_mb("rogue.bin", 1) | |
470 | except CommandFailedError: | |
471 | pass | |
472 | else: | |
473 | raise RuntimeError("post-eviction write should have failed!") | |
474 | ||
f67539c2 | 475 | # The blocklisted guest client should now be unmountable |
31f18b77 | 476 | guest_mounts[0].umount_wait() |
7c673cae FG |
477 | |
478 | # Guest client, guest_mounts[1], using the same auth ID 'guest', but | |
479 | # has mounted the other volume, should be able to use its volume | |
480 | # unaffected. | |
481 | guest_mounts[1].write_n_mb("data.bin.1", 1) | |
482 | ||
483 | # Cleanup. | |
484 | for i in range(2): | |
485 | self._volume_client_python(volumeclient_mount, dedent(""" | |
486 | vp = VolumePath("{group_id}", "{volume_id}") | |
487 | vc.deauthorize(vp, "{guest_entity}") | |
488 | vc.delete_volume(vp) | |
489 | vc.purge_volume(vp) | |
490 | """.format( | |
491 | group_id=group_id, | |
492 | volume_id=volume_ids[i], | |
493 | guest_entity=guest_entity | |
494 | ))) | |
495 | ||
7c673cae FG |
496 | |
497 | def test_purge(self): | |
498 | """ | |
499 | Reproducer for #15266, exception trying to purge volumes that | |
500 | contain non-ascii filenames. | |
501 | ||
502 | Additionally test any other purge corner cases here. | |
503 | """ | |
504 | # I'm going to leave mount_b unmounted and just use it as a handle for | |
505 | # driving volumeclient. It's a little hacky but we don't have a more | |
506 | # general concept for librados/libcephfs clients as opposed to full | |
507 | # blown mounting clients. | |
508 | self.mount_b.umount_wait() | |
509 | self._configure_vc_auth(self.mount_b, "manila") | |
510 | ||
511 | group_id = "grpid" | |
512 | # Use a unicode volume ID (like Manila), to reproduce #15266 | |
513 | volume_id = u"volid" | |
514 | ||
515 | # Create | |
f67539c2 | 516 | cephfs_mntpt = self._volume_client_python(self.mount_b, dedent(""" |
7c673cae FG |
517 | vp = VolumePath("{group_id}", u"{volume_id}") |
518 | create_result = vc.create_volume(vp, 10) | |
91327a77 | 519 | print(create_result['mount_path']) |
7c673cae FG |
520 | """.format( |
521 | group_id=group_id, | |
522 | volume_id=volume_id | |
523 | ))) | |
524 | ||
525 | # Strip leading "/" | |
f67539c2 | 526 | cephfs_mntpt = cephfs_mntpt[1:] |
7c673cae FG |
527 | |
528 | # A file with non-ascii characters | |
f67539c2 | 529 | self.mount_a.run_shell(["touch", os.path.join(cephfs_mntpt, u"b\u00F6b")]) |
7c673cae FG |
530 | |
531 | # A file with no permissions to do anything | |
f67539c2 TL |
532 | self.mount_a.run_shell(["touch", os.path.join(cephfs_mntpt, "noperms")]) |
533 | self.mount_a.run_shell(["chmod", "0000", os.path.join(cephfs_mntpt, "noperms")]) | |
7c673cae FG |
534 | |
535 | self._volume_client_python(self.mount_b, dedent(""" | |
536 | vp = VolumePath("{group_id}", u"{volume_id}") | |
537 | vc.delete_volume(vp) | |
538 | vc.purge_volume(vp) | |
539 | """.format( | |
540 | group_id=group_id, | |
541 | volume_id=volume_id | |
542 | ))) | |
543 | ||
544 | # Check it's really gone | |
545 | self.assertEqual(self.mount_a.ls("volumes/_deleting"), []) | |
546 | self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id]) | |
547 | ||
548 | def test_readonly_authorization(self): | |
549 | """ | |
550 | That guest clients can be restricted to read-only mounts of volumes. | |
551 | """ | |
552 | ||
553 | volumeclient_mount = self.mounts[1] | |
554 | guest_mount = self.mounts[2] | |
555 | volumeclient_mount.umount_wait() | |
556 | guest_mount.umount_wait() | |
557 | ||
558 | # Configure volumeclient_mount as the handle for driving volumeclient. | |
559 | self._configure_vc_auth(volumeclient_mount, "manila") | |
560 | ||
561 | guest_entity = "guest" | |
562 | group_id = "grpid" | |
563 | volume_id = "volid" | |
564 | ||
565 | # Create a volume. | |
f67539c2 | 566 | cephfs_mntpt = self._volume_client_python(volumeclient_mount, dedent(""" |
7c673cae FG |
567 | vp = VolumePath("{group_id}", "{volume_id}") |
568 | create_result = vc.create_volume(vp, 1024*1024*10) | |
91327a77 | 569 | print(create_result['mount_path']) |
7c673cae FG |
570 | """.format( |
571 | group_id=group_id, | |
572 | volume_id=volume_id, | |
573 | ))) | |
574 | ||
575 | # Authorize and configure credentials for the guest to mount the | |
576 | # the volume with read-write access. | |
f67539c2 TL |
577 | self._configure_guest_auth(volumeclient_mount, guest_mount, |
578 | guest_entity, cephfs_mntpt, readonly=False) | |
7c673cae FG |
579 | |
580 | # Mount the volume, and write to it. | |
f67539c2 | 581 | guest_mount.mount_wait(cephfs_mntpt=cephfs_mntpt) |
7c673cae FG |
582 | guest_mount.write_n_mb("data.bin", 1) |
583 | ||
584 | # Change the guest auth ID's authorization to read-only mount access. | |
585 | self._volume_client_python(volumeclient_mount, dedent(""" | |
586 | vp = VolumePath("{group_id}", "{volume_id}") | |
587 | vc.deauthorize(vp, "{guest_entity}") | |
588 | """.format( | |
589 | group_id=group_id, | |
590 | volume_id=volume_id, | |
591 | guest_entity=guest_entity | |
592 | ))) | |
593 | self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity, | |
f67539c2 | 594 | cephfs_mntpt, readonly=True) |
7c673cae FG |
595 | |
596 | # The effect of the change in access level to read-only is not | |
597 | # immediate. The guest sees the change only after a remount of | |
598 | # the volume. | |
599 | guest_mount.umount_wait() | |
f67539c2 | 600 | guest_mount.mount_wait(cephfs_mntpt=cephfs_mntpt) |
7c673cae FG |
601 | |
602 | # Read existing content of the volume. | |
603 | self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) | |
604 | # Cannot write into read-only volume. | |
e306af50 | 605 | try: |
7c673cae | 606 | guest_mount.write_n_mb("rogue.bin", 1) |
e306af50 TL |
607 | except CommandFailedError: |
608 | pass | |
7c673cae FG |
609 | |
610 | def test_get_authorized_ids(self): | |
611 | """ | |
612 | That for a volume, the authorized IDs and their access levels | |
613 | can be obtained using CephFSVolumeClient's get_authorized_ids(). | |
614 | """ | |
615 | volumeclient_mount = self.mounts[1] | |
616 | volumeclient_mount.umount_wait() | |
617 | ||
618 | # Configure volumeclient_mount as the handle for driving volumeclient. | |
619 | self._configure_vc_auth(volumeclient_mount, "manila") | |
620 | ||
621 | group_id = "grpid" | |
622 | volume_id = "volid" | |
623 | guest_entity_1 = "guest1" | |
624 | guest_entity_2 = "guest2" | |
625 | ||
91327a77 | 626 | log.info("print(group ID: {0})".format(group_id)) |
7c673cae FG |
627 | |
628 | # Create a volume. | |
629 | auths = self._volume_client_python(volumeclient_mount, dedent(""" | |
630 | vp = VolumePath("{group_id}", "{volume_id}") | |
631 | vc.create_volume(vp, 1024*1024*10) | |
632 | auths = vc.get_authorized_ids(vp) | |
91327a77 | 633 | print(auths) |
7c673cae FG |
634 | """.format( |
635 | group_id=group_id, | |
636 | volume_id=volume_id, | |
637 | ))) | |
638 | # Check the list of authorized IDs for the volume. | |
9f95a23c | 639 | self.assertEqual('None', auths) |
7c673cae FG |
640 | |
641 | # Allow two auth IDs access to the volume. | |
642 | auths = self._volume_client_python(volumeclient_mount, dedent(""" | |
643 | vp = VolumePath("{group_id}", "{volume_id}") | |
644 | vc.authorize(vp, "{guest_entity_1}", readonly=False) | |
645 | vc.authorize(vp, "{guest_entity_2}", readonly=True) | |
646 | auths = vc.get_authorized_ids(vp) | |
91327a77 | 647 | print(auths) |
7c673cae FG |
648 | """.format( |
649 | group_id=group_id, | |
650 | volume_id=volume_id, | |
651 | guest_entity_1=guest_entity_1, | |
652 | guest_entity_2=guest_entity_2, | |
653 | ))) | |
654 | # Check the list of authorized IDs and their access levels. | |
f67539c2 TL |
655 | expected_result = [('guest1', 'rw'), ('guest2', 'r')] |
656 | self.assertCountEqual(str(expected_result), auths) | |
7c673cae FG |
657 | |
658 | # Disallow both the auth IDs' access to the volume. | |
659 | auths = self._volume_client_python(volumeclient_mount, dedent(""" | |
660 | vp = VolumePath("{group_id}", "{volume_id}") | |
661 | vc.deauthorize(vp, "{guest_entity_1}") | |
662 | vc.deauthorize(vp, "{guest_entity_2}") | |
663 | auths = vc.get_authorized_ids(vp) | |
91327a77 | 664 | print(auths) |
7c673cae FG |
665 | """.format( |
666 | group_id=group_id, | |
667 | volume_id=volume_id, | |
668 | guest_entity_1=guest_entity_1, | |
669 | guest_entity_2=guest_entity_2, | |
670 | ))) | |
671 | # Check the list of authorized IDs for the volume. | |
9f95a23c | 672 | self.assertEqual('None', auths) |
7c673cae FG |
673 | |
674 | def test_multitenant_volumes(self): | |
675 | """ | |
676 | That volume access can be restricted to a tenant. | |
677 | ||
678 | That metadata used to enforce tenant isolation of | |
679 | volumes is stored as a two-way mapping between auth | |
680 | IDs and volumes that they're authorized to access. | |
681 | """ | |
682 | volumeclient_mount = self.mounts[1] | |
683 | volumeclient_mount.umount_wait() | |
684 | ||
685 | # Configure volumeclient_mount as the handle for driving volumeclient. | |
686 | self._configure_vc_auth(volumeclient_mount, "manila") | |
687 | ||
688 | group_id = "groupid" | |
689 | volume_id = "volumeid" | |
690 | ||
691 | # Guest clients belonging to different tenants, but using the same | |
692 | # auth ID. | |
693 | auth_id = "guest" | |
694 | guestclient_1 = { | |
695 | "auth_id": auth_id, | |
696 | "tenant_id": "tenant1", | |
697 | } | |
698 | guestclient_2 = { | |
699 | "auth_id": auth_id, | |
700 | "tenant_id": "tenant2", | |
701 | } | |
702 | ||
703 | # Create a volume. | |
704 | self._volume_client_python(volumeclient_mount, dedent(""" | |
705 | vp = VolumePath("{group_id}", "{volume_id}") | |
706 | vc.create_volume(vp, 1024*1024*10) | |
707 | """.format( | |
708 | group_id=group_id, | |
709 | volume_id=volume_id, | |
710 | ))) | |
711 | ||
712 | # Check that volume metadata file is created on volume creation. | |
713 | vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id) | |
714 | self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) | |
715 | ||
716 | # Authorize 'guestclient_1', using auth ID 'guest' and belonging to | |
717 | # 'tenant1', with 'rw' access to the volume. | |
718 | self._volume_client_python(volumeclient_mount, dedent(""" | |
719 | vp = VolumePath("{group_id}", "{volume_id}") | |
720 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") | |
721 | """.format( | |
722 | group_id=group_id, | |
723 | volume_id=volume_id, | |
724 | auth_id=guestclient_1["auth_id"], | |
725 | tenant_id=guestclient_1["tenant_id"] | |
726 | ))) | |
727 | ||
728 | # Check that auth metadata file for auth ID 'guest', is | |
729 | # created on authorizing 'guest' access to the volume. | |
730 | auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) | |
731 | self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) | |
732 | ||
733 | # Verify that the auth metadata file stores the tenant ID that the | |
734 | # auth ID belongs to, the auth ID's authorized access levels | |
735 | # for different volumes, versioning details, etc. | |
736 | expected_auth_metadata = { | |
28e407b8 | 737 | "version": 2, |
cd265ab1 | 738 | "compat_version": 6, |
28e407b8 | 739 | "dirty": False, |
91327a77 | 740 | "tenant_id": "tenant1", |
cd265ab1 | 741 | "subvolumes": { |
28e407b8 AA |
742 | "groupid/volumeid": { |
743 | "dirty": False, | |
91327a77 | 744 | "access_level": "rw" |
7c673cae FG |
745 | } |
746 | } | |
747 | } | |
748 | ||
749 | auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" | |
28e407b8 | 750 | import json |
7c673cae FG |
751 | vp = VolumePath("{group_id}", "{volume_id}") |
752 | auth_metadata = vc._auth_metadata_get("{auth_id}") | |
28e407b8 | 753 | print(json.dumps(auth_metadata)) |
7c673cae FG |
754 | """.format( |
755 | group_id=group_id, | |
756 | volume_id=volume_id, | |
757 | auth_id=guestclient_1["auth_id"], | |
758 | ))) | |
28e407b8 | 759 | auth_metadata = json.loads(auth_metadata) |
7c673cae | 760 | |
28e407b8 AA |
761 | self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) |
762 | del expected_auth_metadata["version"] | |
763 | del auth_metadata["version"] | |
764 | self.assertEqual(expected_auth_metadata, auth_metadata) | |
7c673cae FG |
765 | |
766 | # Verify that the volume metadata file stores info about auth IDs | |
767 | # and their access levels to the volume, versioning details, etc. | |
768 | expected_vol_metadata = { | |
28e407b8 AA |
769 | "version": 2, |
770 | "compat_version": 1, | |
771 | "auths": { | |
772 | "guest": { | |
773 | "dirty": False, | |
91327a77 | 774 | "access_level": "rw" |
7c673cae FG |
775 | } |
776 | } | |
777 | } | |
778 | ||
779 | vol_metadata = self._volume_client_python(volumeclient_mount, dedent(""" | |
28e407b8 | 780 | import json |
7c673cae FG |
781 | vp = VolumePath("{group_id}", "{volume_id}") |
782 | volume_metadata = vc._volume_metadata_get(vp) | |
28e407b8 | 783 | print(json.dumps(volume_metadata)) |
7c673cae FG |
784 | """.format( |
785 | group_id=group_id, | |
786 | volume_id=volume_id, | |
787 | ))) | |
28e407b8 AA |
788 | vol_metadata = json.loads(vol_metadata) |
789 | ||
790 | self.assertGreaterEqual(vol_metadata["version"], expected_vol_metadata["version"]) | |
791 | del expected_vol_metadata["version"] | |
792 | del vol_metadata["version"] | |
793 | self.assertEqual(expected_vol_metadata, vol_metadata) | |
7c673cae FG |
794 | |
795 | # Cannot authorize 'guestclient_2' to access the volume. | |
796 | # It uses auth ID 'guest', which has already been used by a | |
797 | # 'guestclient_1' belonging to an another tenant for accessing | |
798 | # the volume. | |
799 | with self.assertRaises(CommandFailedError): | |
800 | self._volume_client_python(volumeclient_mount, dedent(""" | |
801 | vp = VolumePath("{group_id}", "{volume_id}") | |
802 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") | |
803 | """.format( | |
804 | group_id=group_id, | |
805 | volume_id=volume_id, | |
806 | auth_id=guestclient_2["auth_id"], | |
807 | tenant_id=guestclient_2["tenant_id"] | |
808 | ))) | |
809 | ||
810 | # Check that auth metadata file is cleaned up on removing | |
811 | # auth ID's only access to a volume. | |
812 | self._volume_client_python(volumeclient_mount, dedent(""" | |
813 | vp = VolumePath("{group_id}", "{volume_id}") | |
814 | vc.deauthorize(vp, "{guest_entity}") | |
815 | """.format( | |
816 | group_id=group_id, | |
817 | volume_id=volume_id, | |
818 | guest_entity=guestclient_1["auth_id"] | |
819 | ))) | |
820 | ||
821 | self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) | |
822 | ||
823 | # Check that volume metadata file is cleaned up on volume deletion. | |
824 | self._volume_client_python(volumeclient_mount, dedent(""" | |
825 | vp = VolumePath("{group_id}", "{volume_id}") | |
826 | vc.delete_volume(vp) | |
827 | """.format( | |
828 | group_id=group_id, | |
829 | volume_id=volume_id, | |
830 | ))) | |
831 | self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) | |
832 | ||
f91f0fd5 TL |
833 | def test_authorize_auth_id_not_created_by_ceph_volume_client(self): |
834 | """ | |
835 | If the auth_id already exists and is not created by | |
836 | ceph_volume_client, it's not allowed to authorize | |
837 | the auth-id by default. | |
838 | """ | |
839 | volumeclient_mount = self.mounts[1] | |
840 | volumeclient_mount.umount_wait() | |
841 | ||
842 | # Configure volumeclient_mount as the handle for driving volumeclient. | |
843 | self._configure_vc_auth(volumeclient_mount, "manila") | |
844 | ||
845 | group_id = "groupid" | |
846 | volume_id = "volumeid" | |
847 | ||
848 | # Create auth_id | |
adb31ebb | 849 | self.fs.mon_manager.raw_cluster_cmd( |
f91f0fd5 TL |
850 | "auth", "get-or-create", "client.guest1", |
851 | "mds", "allow *", | |
852 | "osd", "allow rw", | |
853 | "mon", "allow *" | |
854 | ) | |
855 | ||
856 | auth_id = "guest1" | |
857 | guestclient_1 = { | |
858 | "auth_id": auth_id, | |
859 | "tenant_id": "tenant1", | |
860 | } | |
861 | ||
862 | # Create a volume. | |
863 | self._volume_client_python(volumeclient_mount, dedent(""" | |
864 | vp = VolumePath("{group_id}", "{volume_id}") | |
865 | vc.create_volume(vp, 1024*1024*10) | |
866 | """.format( | |
867 | group_id=group_id, | |
868 | volume_id=volume_id, | |
869 | ))) | |
870 | ||
871 | # Cannot authorize 'guestclient_1' to access the volume. | |
872 | # It uses auth ID 'guest1', which already exists and not | |
873 | # created by ceph_volume_client | |
874 | with self.assertRaises(CommandFailedError): | |
875 | self._volume_client_python(volumeclient_mount, dedent(""" | |
876 | vp = VolumePath("{group_id}", "{volume_id}") | |
877 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") | |
878 | """.format( | |
879 | group_id=group_id, | |
880 | volume_id=volume_id, | |
881 | auth_id=guestclient_1["auth_id"], | |
882 | tenant_id=guestclient_1["tenant_id"] | |
883 | ))) | |
884 | ||
885 | # Delete volume | |
886 | self._volume_client_python(volumeclient_mount, dedent(""" | |
887 | vp = VolumePath("{group_id}", "{volume_id}") | |
888 | vc.delete_volume(vp) | |
889 | """.format( | |
890 | group_id=group_id, | |
891 | volume_id=volume_id, | |
892 | ))) | |
893 | ||
894 | def test_authorize_allow_existing_id_option(self): | |
895 | """ | |
896 | If the auth_id already exists and is not created by | |
897 | ceph_volume_client, it's not allowed to authorize | |
898 | the auth-id by default but is allowed with option | |
899 | allow_existing_id. | |
900 | """ | |
901 | volumeclient_mount = self.mounts[1] | |
902 | volumeclient_mount.umount_wait() | |
903 | ||
904 | # Configure volumeclient_mount as the handle for driving volumeclient. | |
905 | self._configure_vc_auth(volumeclient_mount, "manila") | |
906 | ||
907 | group_id = "groupid" | |
908 | volume_id = "volumeid" | |
909 | ||
910 | # Create auth_id | |
adb31ebb | 911 | self.fs.mon_manager.raw_cluster_cmd( |
f91f0fd5 TL |
912 | "auth", "get-or-create", "client.guest1", |
913 | "mds", "allow *", | |
914 | "osd", "allow rw", | |
915 | "mon", "allow *" | |
916 | ) | |
917 | ||
918 | auth_id = "guest1" | |
919 | guestclient_1 = { | |
920 | "auth_id": auth_id, | |
921 | "tenant_id": "tenant1", | |
922 | } | |
923 | ||
924 | # Create a volume. | |
925 | self._volume_client_python(volumeclient_mount, dedent(""" | |
926 | vp = VolumePath("{group_id}", "{volume_id}") | |
927 | vc.create_volume(vp, 1024*1024*10) | |
928 | """.format( | |
929 | group_id=group_id, | |
930 | volume_id=volume_id, | |
931 | ))) | |
932 | ||
933 | # Cannot authorize 'guestclient_1' to access the volume | |
934 | # by default, which already exists and not created by | |
935 | # ceph_volume_client but is allowed with option 'allow_existing_id'. | |
936 | self._volume_client_python(volumeclient_mount, dedent(""" | |
937 | vp = VolumePath("{group_id}", "{volume_id}") | |
938 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}", | |
939 | allow_existing_id="{allow_existing_id}") | |
940 | """.format( | |
941 | group_id=group_id, | |
942 | volume_id=volume_id, | |
943 | auth_id=guestclient_1["auth_id"], | |
944 | tenant_id=guestclient_1["tenant_id"], | |
945 | allow_existing_id=True | |
946 | ))) | |
947 | ||
948 | # Delete volume | |
949 | self._volume_client_python(volumeclient_mount, dedent(""" | |
950 | vp = VolumePath("{group_id}", "{volume_id}") | |
951 | vc.delete_volume(vp) | |
952 | """.format( | |
953 | group_id=group_id, | |
954 | volume_id=volume_id, | |
955 | ))) | |
956 | ||
957 | def test_deauthorize_auth_id_after_out_of_band_update(self): | |
958 | """ | |
959 | If the auth_id authorized by ceph_volume_client is updated | |
960 | out of band, the auth_id should not be deleted after a | |
961 | deauthorize. It should only remove caps associated it. | |
962 | """ | |
963 | volumeclient_mount = self.mounts[1] | |
964 | volumeclient_mount.umount_wait() | |
965 | ||
966 | # Configure volumeclient_mount as the handle for driving volumeclient. | |
967 | self._configure_vc_auth(volumeclient_mount, "manila") | |
968 | ||
969 | group_id = "groupid" | |
970 | volume_id = "volumeid" | |
971 | ||
972 | ||
973 | auth_id = "guest1" | |
974 | guestclient_1 = { | |
975 | "auth_id": auth_id, | |
976 | "tenant_id": "tenant1", | |
977 | } | |
978 | ||
979 | # Create a volume. | |
980 | self._volume_client_python(volumeclient_mount, dedent(""" | |
981 | vp = VolumePath("{group_id}", "{volume_id}") | |
982 | vc.create_volume(vp, 1024*1024*10) | |
983 | """.format( | |
984 | group_id=group_id, | |
985 | volume_id=volume_id, | |
986 | ))) | |
987 | ||
988 | # Authorize 'guestclient_1' to access the volume. | |
989 | self._volume_client_python(volumeclient_mount, dedent(""" | |
990 | vp = VolumePath("{group_id}", "{volume_id}") | |
991 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") | |
992 | """.format( | |
993 | group_id=group_id, | |
994 | volume_id=volume_id, | |
995 | auth_id=guestclient_1["auth_id"], | |
996 | tenant_id=guestclient_1["tenant_id"] | |
997 | ))) | |
998 | ||
999 | # Update caps for guestclient_1 out of band | |
1000 | out = self.fs.mon_manager.raw_cluster_cmd( | |
1001 | "auth", "caps", "client.guest1", | |
1002 | "mds", "allow rw path=/volumes/groupid, allow rw path=/volumes/groupid/volumeid", | |
1003 | "osd", "allow rw pool=cephfs_data namespace=fsvolumens_volumeid", | |
1004 | "mon", "allow r", | |
1005 | "mgr", "allow *" | |
1006 | ) | |
1007 | ||
1008 | # Deauthorize guestclient_1 | |
1009 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1010 | vp = VolumePath("{group_id}", "{volume_id}") | |
1011 | vc.deauthorize(vp, "{guest_entity}") | |
1012 | """.format( | |
1013 | group_id=group_id, | |
1014 | volume_id=volume_id, | |
1015 | guest_entity=guestclient_1["auth_id"] | |
1016 | ))) | |
1017 | ||
1018 | # Validate the caps of guestclient_1 after deauthorize. It should not have deleted | |
1019 | # guestclient_1. The mgr and mds caps should be present which was updated out of band. | |
1020 | out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty")) | |
1021 | ||
1022 | self.assertEqual("client.guest1", out[0]["entity"]) | |
1023 | self.assertEqual("allow rw path=/volumes/groupid", out[0]["caps"]["mds"]) | |
1024 | self.assertEqual("allow *", out[0]["caps"]["mgr"]) | |
1025 | self.assertNotIn("osd", out[0]["caps"]) | |
1026 | ||
1027 | # Delete volume | |
1028 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1029 | vp = VolumePath("{group_id}", "{volume_id}") | |
1030 | vc.delete_volume(vp) | |
1031 | """.format( | |
1032 | group_id=group_id, | |
1033 | volume_id=volume_id, | |
1034 | ))) | |
1035 | ||
7c673cae FG |
1036 | def test_recover_metadata(self): |
1037 | """ | |
1038 | That volume client can recover from partial auth updates using | |
1039 | metadata files, which store auth info and its update status info. | |
1040 | """ | |
1041 | volumeclient_mount = self.mounts[1] | |
1042 | volumeclient_mount.umount_wait() | |
1043 | ||
1044 | # Configure volumeclient_mount as the handle for driving volumeclient. | |
1045 | self._configure_vc_auth(volumeclient_mount, "manila") | |
1046 | ||
1047 | group_id = "groupid" | |
1048 | volume_id = "volumeid" | |
1049 | ||
1050 | guestclient = { | |
1051 | "auth_id": "guest", | |
1052 | "tenant_id": "tenant", | |
1053 | } | |
1054 | ||
1055 | # Create a volume. | |
1056 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1057 | vp = VolumePath("{group_id}", "{volume_id}") | |
1058 | vc.create_volume(vp, 1024*1024*10) | |
1059 | """.format( | |
1060 | group_id=group_id, | |
1061 | volume_id=volume_id, | |
1062 | ))) | |
1063 | ||
1064 | # Authorize 'guestclient' access to the volume. | |
1065 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1066 | vp = VolumePath("{group_id}", "{volume_id}") | |
1067 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") | |
1068 | """.format( | |
1069 | group_id=group_id, | |
1070 | volume_id=volume_id, | |
1071 | auth_id=guestclient["auth_id"], | |
1072 | tenant_id=guestclient["tenant_id"] | |
1073 | ))) | |
1074 | ||
1075 | # Check that auth metadata file for auth ID 'guest' is created. | |
1076 | auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"]) | |
1077 | self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) | |
1078 | ||
1079 | # Induce partial auth update state by modifying the auth metadata file, | |
1080 | # and then run recovery procedure. | |
1081 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1082 | vp = VolumePath("{group_id}", "{volume_id}") | |
1083 | auth_metadata = vc._auth_metadata_get("{auth_id}") | |
1084 | auth_metadata['dirty'] = True | |
1085 | vc._auth_metadata_set("{auth_id}", auth_metadata) | |
1086 | vc.recover() | |
1087 | """.format( | |
1088 | group_id=group_id, | |
1089 | volume_id=volume_id, | |
1090 | auth_id=guestclient["auth_id"], | |
1091 | ))) | |
3efd9988 | 1092 | |
cd265ab1 TL |
1093 | def test_update_old_style_auth_metadata_to_new_during_recover(self): |
1094 | """ | |
1095 | From nautilus onwards 'volumes' created by ceph_volume_client were | |
1096 | renamed and used as CephFS subvolumes accessed via the ceph-mgr | |
1097 | interface. Hence it makes sense to store the subvolume data in | |
1098 | auth-metadata file with 'subvolumes' key instead of 'volumes' key. | |
1099 | This test validates the transparent update of 'volumes' key to | |
1100 | 'subvolumes' key in auth metadata file during recover. | |
1101 | """ | |
1102 | volumeclient_mount = self.mounts[1] | |
1103 | volumeclient_mount.umount_wait() | |
1104 | ||
1105 | # Configure volumeclient_mount as the handle for driving volumeclient. | |
1106 | self._configure_vc_auth(volumeclient_mount, "manila") | |
1107 | ||
1108 | group_id = "groupid" | |
1109 | volume_id = "volumeid" | |
1110 | ||
1111 | guestclient = { | |
1112 | "auth_id": "guest", | |
1113 | "tenant_id": "tenant", | |
1114 | } | |
1115 | ||
1116 | # Create a volume. | |
1117 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1118 | vp = VolumePath("{group_id}", "{volume_id}") | |
1119 | vc.create_volume(vp, 1024*1024*10) | |
1120 | """.format( | |
1121 | group_id=group_id, | |
1122 | volume_id=volume_id, | |
1123 | ))) | |
1124 | ||
1125 | # Check that volume metadata file is created on volume creation. | |
1126 | vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id) | |
1127 | self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) | |
1128 | ||
1129 | # Authorize 'guestclient' access to the volume. | |
1130 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1131 | vp = VolumePath("{group_id}", "{volume_id}") | |
1132 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") | |
1133 | """.format( | |
1134 | group_id=group_id, | |
1135 | volume_id=volume_id, | |
1136 | auth_id=guestclient["auth_id"], | |
1137 | tenant_id=guestclient["tenant_id"] | |
1138 | ))) | |
1139 | ||
1140 | # Check that auth metadata file for auth ID 'guest' is created. | |
1141 | auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"]) | |
1142 | self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) | |
1143 | ||
1144 | # Replace 'subvolumes' to 'volumes', old style auth-metadata file | |
1145 | self.mounts[0].run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) | |
1146 | ||
1147 | # Verify that the auth metadata file stores the tenant ID that the | |
1148 | # auth ID belongs to, the auth ID's authorized access levels | |
1149 | # for different volumes, versioning details, etc. | |
1150 | expected_auth_metadata = { | |
1151 | "version": 2, | |
1152 | "compat_version": 6, | |
1153 | "dirty": False, | |
1154 | "tenant_id": "tenant", | |
1155 | "subvolumes": { | |
1156 | "groupid/volumeid": { | |
1157 | "dirty": False, | |
1158 | "access_level": "rw" | |
1159 | } | |
1160 | } | |
1161 | } | |
1162 | ||
1163 | # Induce partial auth update state by modifying the auth metadata file, | |
1164 | # and then run recovery procedure. This should also update 'volumes' key | |
1165 | # to 'subvolumes'. | |
1166 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1167 | vp = VolumePath("{group_id}", "{volume_id}") | |
1168 | auth_metadata = vc._auth_metadata_get("{auth_id}") | |
1169 | auth_metadata['dirty'] = True | |
1170 | vc._auth_metadata_set("{auth_id}", auth_metadata) | |
1171 | vc.recover() | |
1172 | """.format( | |
1173 | group_id=group_id, | |
1174 | volume_id=volume_id, | |
1175 | auth_id=guestclient["auth_id"], | |
1176 | ))) | |
1177 | ||
1178 | auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" | |
1179 | import json | |
1180 | auth_metadata = vc._auth_metadata_get("{auth_id}") | |
1181 | print(json.dumps(auth_metadata)) | |
1182 | """.format( | |
1183 | auth_id=guestclient["auth_id"], | |
1184 | ))) | |
1185 | auth_metadata = json.loads(auth_metadata) | |
1186 | ||
1187 | self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) | |
1188 | del expected_auth_metadata["version"] | |
1189 | del auth_metadata["version"] | |
1190 | self.assertEqual(expected_auth_metadata, auth_metadata) | |
1191 | ||
1192 | # Check that auth metadata file is cleaned up on removing | |
1193 | # auth ID's access to volumes 'volumeid'. | |
1194 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1195 | vp = VolumePath("{group_id}", "{volume_id}") | |
1196 | vc.deauthorize(vp, "{guest_entity}") | |
1197 | """.format( | |
1198 | group_id=group_id, | |
1199 | volume_id=volume_id, | |
1200 | guest_entity=guestclient["auth_id"] | |
1201 | ))) | |
1202 | self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) | |
1203 | ||
1204 | # Check that volume metadata file is cleaned up on volume deletion. | |
1205 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1206 | vp = VolumePath("{group_id}", "{volume_id}") | |
1207 | vc.delete_volume(vp) | |
1208 | """.format( | |
1209 | group_id=group_id, | |
1210 | volume_id=volume_id, | |
1211 | ))) | |
1212 | self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) | |
1213 | ||
1214 | def test_update_old_style_auth_metadata_to_new_during_authorize(self): | |
1215 | """ | |
1216 | From nautilus onwards 'volumes' created by ceph_volume_client were | |
1217 | renamed and used as CephFS subvolumes accessed via the ceph-mgr | |
1218 | interface. Hence it makes sense to store the subvolume data in | |
1219 | auth-metadata file with 'subvolumes' key instead of 'volumes' key. | |
1220 | This test validates the transparent update of 'volumes' key to | |
1221 | 'subvolumes' key in auth metadata file during authorize. | |
1222 | """ | |
1223 | volumeclient_mount = self.mounts[1] | |
1224 | volumeclient_mount.umount_wait() | |
1225 | ||
1226 | # Configure volumeclient_mount as the handle for driving volumeclient. | |
1227 | self._configure_vc_auth(volumeclient_mount, "manila") | |
1228 | ||
1229 | group_id = "groupid" | |
1230 | volume_id1 = "volumeid1" | |
1231 | volume_id2 = "volumeid2" | |
1232 | ||
1233 | auth_id = "guest" | |
1234 | guestclient_1 = { | |
1235 | "auth_id": auth_id, | |
1236 | "tenant_id": "tenant1", | |
1237 | } | |
1238 | ||
1239 | # Create a volume volumeid1. | |
1240 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1241 | vp = VolumePath("{group_id}", "{volume_id}") | |
1242 | create_result = vc.create_volume(vp, 10*1024*1024) | |
1243 | print(create_result['mount_path']) | |
1244 | """.format( | |
1245 | group_id=group_id, | |
1246 | volume_id=volume_id1, | |
1247 | ))) | |
1248 | ||
1249 | # Create a volume volumeid2. | |
1250 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1251 | vp = VolumePath("{group_id}", "{volume_id}") | |
1252 | create_result = vc.create_volume(vp, 10*1024*1024) | |
1253 | print(create_result['mount_path']) | |
1254 | """.format( | |
1255 | group_id=group_id, | |
1256 | volume_id=volume_id2, | |
1257 | ))) | |
1258 | ||
1259 | # Check that volume metadata file is created on volume creation. | |
1260 | vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id1) | |
1261 | self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) | |
1262 | vol_metadata_filename2 = "_{0}:{1}.meta".format(group_id, volume_id2) | |
1263 | self.assertIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) | |
1264 | ||
1265 | # Authorize 'guestclient_1', using auth ID 'guest' and belonging to | |
1266 | # 'tenant1', with 'rw' access to the volume 'volumeid1'. | |
1267 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1268 | vp = VolumePath("{group_id}", "{volume_id}") | |
1269 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") | |
1270 | """.format( | |
1271 | group_id=group_id, | |
1272 | volume_id=volume_id1, | |
1273 | auth_id=guestclient_1["auth_id"], | |
1274 | tenant_id=guestclient_1["tenant_id"] | |
1275 | ))) | |
1276 | ||
1277 | # Check that auth metadata file for auth ID 'guest', is | |
1278 | # created on authorizing 'guest' access to the volume. | |
1279 | auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) | |
1280 | self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) | |
1281 | ||
1282 | # Replace 'subvolumes' to 'volumes', old style auth-metadata file | |
1283 | self.mounts[0].run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) | |
1284 | ||
1285 | # Authorize 'guestclient_1', using auth ID 'guest' and belonging to | |
1286 | # 'tenant1', with 'rw' access to the volume 'volumeid2'. | |
1287 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1288 | vp = VolumePath("{group_id}", "{volume_id}") | |
1289 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") | |
1290 | """.format( | |
1291 | group_id=group_id, | |
1292 | volume_id=volume_id2, | |
1293 | auth_id=guestclient_1["auth_id"], | |
1294 | tenant_id=guestclient_1["tenant_id"] | |
1295 | ))) | |
1296 | ||
1297 | # Verify that the auth metadata file stores the tenant ID that the | |
1298 | # auth ID belongs to, the auth ID's authorized access levels | |
1299 | # for different volumes, versioning details, etc. | |
1300 | expected_auth_metadata = { | |
1301 | "version": 2, | |
1302 | "compat_version": 6, | |
1303 | "dirty": False, | |
1304 | "tenant_id": "tenant1", | |
1305 | "subvolumes": { | |
1306 | "groupid/volumeid1": { | |
1307 | "dirty": False, | |
1308 | "access_level": "rw" | |
1309 | }, | |
1310 | "groupid/volumeid2": { | |
1311 | "dirty": False, | |
1312 | "access_level": "rw" | |
1313 | } | |
1314 | } | |
1315 | } | |
1316 | ||
1317 | auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" | |
1318 | import json | |
1319 | auth_metadata = vc._auth_metadata_get("{auth_id}") | |
1320 | print(json.dumps(auth_metadata)) | |
1321 | """.format( | |
1322 | auth_id=guestclient_1["auth_id"], | |
1323 | ))) | |
1324 | auth_metadata = json.loads(auth_metadata) | |
1325 | ||
1326 | self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) | |
1327 | del expected_auth_metadata["version"] | |
1328 | del auth_metadata["version"] | |
1329 | self.assertEqual(expected_auth_metadata, auth_metadata) | |
1330 | ||
1331 | # Check that auth metadata file is cleaned up on removing | |
1332 | # auth ID's access to volumes 'volumeid1' and 'volumeid2'. | |
1333 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1334 | vp = VolumePath("{group_id}", "{volume_id}") | |
1335 | vc.deauthorize(vp, "{guest_entity}") | |
1336 | """.format( | |
1337 | group_id=group_id, | |
1338 | volume_id=volume_id1, | |
1339 | guest_entity=guestclient_1["auth_id"] | |
1340 | ))) | |
1341 | ||
1342 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1343 | vp = VolumePath("{group_id}", "{volume_id}") | |
1344 | vc.deauthorize(vp, "{guest_entity}") | |
1345 | """.format( | |
1346 | group_id=group_id, | |
1347 | volume_id=volume_id2, | |
1348 | guest_entity=guestclient_1["auth_id"] | |
1349 | ))) | |
1350 | self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) | |
1351 | ||
1352 | # Check that volume metadata file is cleaned up on volume deletion. | |
1353 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1354 | vp = VolumePath("{group_id}", "{volume_id}") | |
1355 | vc.delete_volume(vp) | |
1356 | """.format( | |
1357 | group_id=group_id, | |
1358 | volume_id=volume_id1, | |
1359 | ))) | |
1360 | self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) | |
1361 | ||
1362 | # Check that volume metadata file is cleaned up on volume deletion. | |
1363 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1364 | vp = VolumePath("{group_id}", "{volume_id}") | |
1365 | vc.delete_volume(vp) | |
1366 | """.format( | |
1367 | group_id=group_id, | |
1368 | volume_id=volume_id2, | |
1369 | ))) | |
1370 | self.assertNotIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) | |
1371 | ||
1372 | def test_update_old_style_auth_metadata_to_new_during_deauthorize(self): | |
1373 | """ | |
1374 | From nautilus onwards 'volumes' created by ceph_volume_client were | |
1375 | renamed and used as CephFS subvolumes accessed via the ceph-mgr | |
1376 | interface. Hence it makes sense to store the subvolume data in | |
1377 | auth-metadata file with 'subvolumes' key instead of 'volumes' key. | |
1378 | This test validates the transparent update of 'volumes' key to | |
1379 | 'subvolumes' key in auth metadata file during de-authorize. | |
1380 | """ | |
1381 | volumeclient_mount = self.mounts[1] | |
1382 | volumeclient_mount.umount_wait() | |
1383 | ||
1384 | # Configure volumeclient_mount as the handle for driving volumeclient. | |
1385 | self._configure_vc_auth(volumeclient_mount, "manila") | |
1386 | ||
1387 | group_id = "groupid" | |
1388 | volume_id1 = "volumeid1" | |
1389 | volume_id2 = "volumeid2" | |
1390 | ||
1391 | auth_id = "guest" | |
1392 | guestclient_1 = { | |
1393 | "auth_id": auth_id, | |
1394 | "tenant_id": "tenant1", | |
1395 | } | |
1396 | ||
1397 | # Create a volume volumeid1. | |
1398 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1399 | vp = VolumePath("{group_id}", "{volume_id}") | |
1400 | create_result = vc.create_volume(vp, 10*1024*1024) | |
1401 | print(create_result['mount_path']) | |
1402 | """.format( | |
1403 | group_id=group_id, | |
1404 | volume_id=volume_id1, | |
1405 | ))) | |
1406 | ||
1407 | # Create a volume volumeid2. | |
1408 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1409 | vp = VolumePath("{group_id}", "{volume_id}") | |
1410 | create_result = vc.create_volume(vp, 10*1024*1024) | |
1411 | print(create_result['mount_path']) | |
1412 | """.format( | |
1413 | group_id=group_id, | |
1414 | volume_id=volume_id2, | |
1415 | ))) | |
1416 | ||
1417 | # Check that volume metadata file is created on volume creation. | |
1418 | vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id1) | |
1419 | self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) | |
1420 | vol_metadata_filename2 = "_{0}:{1}.meta".format(group_id, volume_id2) | |
1421 | self.assertIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) | |
1422 | ||
1423 | # Authorize 'guestclient_1', using auth ID 'guest' and belonging to | |
1424 | # 'tenant1', with 'rw' access to the volume 'volumeid1'. | |
1425 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1426 | vp = VolumePath("{group_id}", "{volume_id}") | |
1427 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") | |
1428 | """.format( | |
1429 | group_id=group_id, | |
1430 | volume_id=volume_id1, | |
1431 | auth_id=guestclient_1["auth_id"], | |
1432 | tenant_id=guestclient_1["tenant_id"] | |
1433 | ))) | |
1434 | ||
1435 | # Authorize 'guestclient_1', using auth ID 'guest' and belonging to | |
1436 | # 'tenant1', with 'rw' access to the volume 'volumeid2'. | |
1437 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1438 | vp = VolumePath("{group_id}", "{volume_id}") | |
1439 | vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") | |
1440 | """.format( | |
1441 | group_id=group_id, | |
1442 | volume_id=volume_id2, | |
1443 | auth_id=guestclient_1["auth_id"], | |
1444 | tenant_id=guestclient_1["tenant_id"] | |
1445 | ))) | |
1446 | ||
1447 | # Check that auth metadata file for auth ID 'guest', is | |
1448 | # created on authorizing 'guest' access to the volume. | |
1449 | auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) | |
1450 | self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) | |
1451 | ||
1452 | # Replace 'subvolumes' to 'volumes', old style auth-metadata file | |
1453 | self.mounts[0].run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) | |
1454 | ||
1455 | # Deauthorize 'guestclient_1' to access 'volumeid2'. This should update | |
1456 | # 'volumes' key to 'subvolumes' | |
1457 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1458 | vp = VolumePath("{group_id}", "{volume_id}") | |
1459 | vc.deauthorize(vp, "{guest_entity}") | |
1460 | """.format( | |
1461 | group_id=group_id, | |
1462 | volume_id=volume_id2, | |
1463 | guest_entity=guestclient_1["auth_id"], | |
1464 | ))) | |
1465 | ||
1466 | # Verify that the auth metadata file stores the tenant ID that the | |
1467 | # auth ID belongs to, the auth ID's authorized access levels | |
1468 | # for different volumes, versioning details, etc. | |
1469 | expected_auth_metadata = { | |
1470 | "version": 2, | |
1471 | "compat_version": 6, | |
1472 | "dirty": False, | |
1473 | "tenant_id": "tenant1", | |
1474 | "subvolumes": { | |
1475 | "groupid/volumeid1": { | |
1476 | "dirty": False, | |
1477 | "access_level": "rw" | |
1478 | } | |
1479 | } | |
1480 | } | |
1481 | ||
1482 | auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" | |
1483 | import json | |
1484 | auth_metadata = vc._auth_metadata_get("{auth_id}") | |
1485 | print(json.dumps(auth_metadata)) | |
1486 | """.format( | |
1487 | auth_id=guestclient_1["auth_id"], | |
1488 | ))) | |
1489 | auth_metadata = json.loads(auth_metadata) | |
1490 | ||
1491 | self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) | |
1492 | del expected_auth_metadata["version"] | |
1493 | del auth_metadata["version"] | |
1494 | self.assertEqual(expected_auth_metadata, auth_metadata) | |
1495 | ||
1496 | # Check that auth metadata file is cleaned up on removing | |
1497 | # auth ID's access to volumes 'volumeid1' and 'volumeid2' | |
1498 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1499 | vp = VolumePath("{group_id}", "{volume_id}") | |
1500 | vc.deauthorize(vp, "{guest_entity}") | |
1501 | """.format( | |
1502 | group_id=group_id, | |
1503 | volume_id=volume_id1, | |
1504 | guest_entity=guestclient_1["auth_id"] | |
1505 | ))) | |
1506 | self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) | |
1507 | ||
1508 | # Check that volume metadata file is cleaned up on 'volumeid1' deletion. | |
1509 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1510 | vp = VolumePath("{group_id}", "{volume_id}") | |
1511 | vc.delete_volume(vp) | |
1512 | """.format( | |
1513 | group_id=group_id, | |
1514 | volume_id=volume_id1, | |
1515 | ))) | |
1516 | self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) | |
1517 | ||
1518 | # Check that volume metadata file is cleaned up on 'volumeid2' deletion. | |
1519 | self._volume_client_python(volumeclient_mount, dedent(""" | |
1520 | vp = VolumePath("{group_id}", "{volume_id}") | |
1521 | vc.delete_volume(vp) | |
1522 | """.format( | |
1523 | group_id=group_id, | |
1524 | volume_id=volume_id2, | |
1525 | ))) | |
1526 | self.assertNotIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) | |
1527 | ||
3efd9988 FG |
1528 | def test_put_object(self): |
1529 | vc_mount = self.mounts[1] | |
1530 | vc_mount.umount_wait() | |
1531 | self._configure_vc_auth(vc_mount, "manila") | |
1532 | ||
1533 | obj_data = 'test data' | |
1534 | obj_name = 'test_vc_obj_1' | |
1535 | pool_name = self.fs.get_data_pool_names()[0] | |
1536 | ||
1537 | self._volume_client_python(vc_mount, dedent(""" | |
1538 | vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}") | |
1539 | """.format( | |
1540 | pool_name = pool_name, | |
1541 | obj_name = obj_name, | |
1542 | obj_data = obj_data | |
1543 | ))) | |
1544 | ||
b3b6e05e | 1545 | read_data = self.fs.rados(['get', obj_name, '-'], pool=pool_name, stdout=StringIO()).stdout.getvalue() |
3efd9988 FG |
1546 | self.assertEqual(obj_data, read_data) |
1547 | ||
1548 | def test_get_object(self): | |
1549 | vc_mount = self.mounts[1] | |
1550 | vc_mount.umount_wait() | |
1551 | self._configure_vc_auth(vc_mount, "manila") | |
1552 | ||
1553 | obj_data = 'test_data' | |
1554 | obj_name = 'test_vc_ob_2' | |
1555 | pool_name = self.fs.get_data_pool_names()[0] | |
1556 | ||
b3b6e05e | 1557 | self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin=StringIO(obj_data)) |
3efd9988 FG |
1558 | |
1559 | self._volume_client_python(vc_mount, dedent(""" | |
1560 | data_read = vc.get_object("{pool_name}", "{obj_name}") | |
1561 | assert data_read == b"{obj_data}" | |
1562 | """.format( | |
1563 | pool_name = pool_name, | |
1564 | obj_name = obj_name, | |
1565 | obj_data = obj_data | |
1566 | ))) | |
1567 | ||
91327a77 AA |
1568 | def test_put_object_versioned(self): |
1569 | vc_mount = self.mounts[1] | |
1570 | vc_mount.umount_wait() | |
1571 | self._configure_vc_auth(vc_mount, "manila") | |
1572 | ||
eafe8130 TL |
1573 | obj_data = 'test_data' |
1574 | obj_name = 'test_vc_obj' | |
1575 | pool_name = self.fs.get_data_pool_names()[0] | |
b3b6e05e | 1576 | self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin=StringIO(obj_data)) |
eafe8130 TL |
1577 | |
1578 | self._volume_client_python(vc_mount, dedent(""" | |
1579 | data, version_before = vc.get_object_and_version("{pool_name}", "{obj_name}") | |
1580 | ||
1581 | if sys_version_info.major < 3: | |
1582 | data = data + 'modification1' | |
1583 | elif sys_version_info.major > 3: | |
1584 | data = str.encode(data.decode() + 'modification1') | |
1585 | ||
1586 | vc.put_object_versioned("{pool_name}", "{obj_name}", data, version_before) | |
1587 | data, version_after = vc.get_object_and_version("{pool_name}", "{obj_name}") | |
1588 | assert version_after == version_before + 1 | |
1589 | """).format(pool_name=pool_name, obj_name=obj_name)) | |
1590 | ||
1591 | def test_version_check_for_put_object_versioned(self): | |
1592 | vc_mount = self.mounts[1] | |
1593 | vc_mount.umount_wait() | |
1594 | self._configure_vc_auth(vc_mount, "manila") | |
1595 | ||
91327a77 AA |
1596 | obj_data = 'test_data' |
1597 | obj_name = 'test_vc_ob_2' | |
1598 | pool_name = self.fs.get_data_pool_names()[0] | |
b3b6e05e | 1599 | self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin=StringIO(obj_data)) |
91327a77 AA |
1600 | |
1601 | # Test if put_object_versioned() crosschecks the version of the | |
1602 | # given object. Being a negative test, an exception is expected. | |
eafe8130 TL |
1603 | expected_exception = 'rados_OSError' |
1604 | output = self._volume_client_python(vc_mount, dedent(""" | |
1605 | data, version = vc.get_object_and_version("{pool_name}", "{obj_name}") | |
1606 | ||
1607 | if sys_version_info.major < 3: | |
1608 | data = data + 'm1' | |
1609 | elif sys_version_info.major > 3: | |
1610 | data = str.encode(data.decode('utf-8') + 'm1') | |
1611 | ||
1612 | vc.put_object("{pool_name}", "{obj_name}", data) | |
1613 | ||
1614 | if sys_version_info.major < 3: | |
1615 | data = data + 'm2' | |
1616 | elif sys_version_info.major > 3: | |
1617 | data = str.encode(data.decode('utf-8') + 'm2') | |
1618 | ||
1619 | try: | |
91327a77 | 1620 | vc.put_object_versioned("{pool_name}", "{obj_name}", data, version) |
eafe8130 TL |
1621 | except {expected_exception}: |
1622 | print('{expected_exception} raised') | |
1623 | """).format(pool_name=pool_name, obj_name=obj_name, | |
1624 | expected_exception=expected_exception)) | |
1625 | self.assertEqual(expected_exception + ' raised', output) | |
1626 | ||
91327a77 | 1627 | |
3efd9988 FG |
1628 | def test_delete_object(self): |
1629 | vc_mount = self.mounts[1] | |
1630 | vc_mount.umount_wait() | |
1631 | self._configure_vc_auth(vc_mount, "manila") | |
1632 | ||
1633 | obj_data = 'test data' | |
1634 | obj_name = 'test_vc_obj_3' | |
1635 | pool_name = self.fs.get_data_pool_names()[0] | |
1636 | ||
b3b6e05e | 1637 | self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin=StringIO(obj_data)) |
3efd9988 FG |
1638 | |
1639 | self._volume_client_python(vc_mount, dedent(""" | |
1640 | data_read = vc.delete_object("{pool_name}", "{obj_name}") | |
1641 | """.format( | |
1642 | pool_name = pool_name, | |
1643 | obj_name = obj_name, | |
1644 | ))) | |
1645 | ||
1646 | with self.assertRaises(CommandFailedError): | |
1647 | self.fs.rados(['stat', obj_name], pool=pool_name) | |
1648 | ||
1649 | # Check idempotency -- no error raised trying to delete non-existent | |
1650 | # object | |
1651 | self._volume_client_python(vc_mount, dedent(""" | |
1652 | data_read = vc.delete_object("{pool_name}", "{obj_name}") | |
1653 | """.format( | |
1654 | pool_name = pool_name, | |
1655 | obj_name = obj_name, | |
1656 | ))) | |
1657 | ||
1658 | def test_21501(self): | |
1659 | """ | |
1660 | Reproducer for #21501 "ceph_volume_client: sets invalid caps for | |
1661 | existing IDs with no caps" (http://tracker.ceph.com/issues/21501) | |
1662 | """ | |
1663 | ||
1664 | vc_mount = self.mounts[1] | |
1665 | vc_mount.umount_wait() | |
1666 | ||
1667 | # Configure vc_mount as the handle for driving volumeclient | |
1668 | self._configure_vc_auth(vc_mount, "manila") | |
1669 | ||
1670 | # Create a volume | |
1671 | group_id = "grpid" | |
1672 | volume_id = "volid" | |
f67539c2 | 1673 | cephfs_mntpt = self._volume_client_python(vc_mount, dedent(""" |
3efd9988 FG |
1674 | vp = VolumePath("{group_id}", "{volume_id}") |
1675 | create_result = vc.create_volume(vp, 1024*1024*10) | |
91327a77 | 1676 | print(create_result['mount_path']) |
3efd9988 FG |
1677 | """.format( |
1678 | group_id=group_id, | |
1679 | volume_id=volume_id | |
1680 | ))) | |
1681 | ||
1682 | # Create an auth ID with no caps | |
1683 | guest_id = '21501' | |
1684 | self.fs.mon_manager.raw_cluster_cmd_result( | |
1685 | 'auth', 'get-or-create', 'client.{0}'.format(guest_id)) | |
1686 | ||
1687 | guest_mount = self.mounts[2] | |
1688 | guest_mount.umount_wait() | |
f67539c2 TL |
1689 | # Set auth caps for the auth ID using the volumeclient |
1690 | self._configure_guest_auth(vc_mount, guest_mount, guest_id, cephfs_mntpt, | |
f91f0fd5 | 1691 | allow_existing_id=True) |
3efd9988 FG |
1692 | |
1693 | # Mount the volume in the guest using the auth ID to assert that the | |
1694 | # auth caps are valid | |
f67539c2 | 1695 | guest_mount.mount_wait(cephfs_mntpt=cephfs_mntpt) |
28e407b8 AA |
1696 | |
1697 | def test_volume_without_namespace_isolation(self): | |
1698 | """ | |
1699 | That volume client can create volumes that do not have separate RADOS | |
1700 | namespace layouts. | |
1701 | """ | |
1702 | vc_mount = self.mounts[1] | |
1703 | vc_mount.umount_wait() | |
1704 | ||
1705 | # Configure vc_mount as the handle for driving volumeclient | |
1706 | self._configure_vc_auth(vc_mount, "manila") | |
1707 | ||
1708 | # Create a volume | |
1709 | volume_prefix = "/myprefix" | |
1710 | group_id = "grpid" | |
1711 | volume_id = "volid" | |
9f95a23c | 1712 | self._volume_client_python(vc_mount, dedent(""" |
28e407b8 AA |
1713 | vp = VolumePath("{group_id}", "{volume_id}") |
1714 | create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False) | |
91327a77 | 1715 | print(create_result['mount_path']) |
28e407b8 AA |
1716 | """.format( |
1717 | group_id=group_id, | |
1718 | volume_id=volume_id | |
1719 | )), volume_prefix) | |
1720 | ||
1721 | # The CephFS volume should be created | |
1722 | self.mounts[0].stat(os.path.join("myprefix", group_id, volume_id)) | |
1723 | vol_namespace = self.mounts[0].getfattr( | |
1724 | os.path.join("myprefix", group_id, volume_id), | |
1725 | "ceph.dir.layout.pool_namespace") | |
1726 | assert not vol_namespace | |
1727 | ||
1728 | self._volume_client_python(vc_mount, dedent(""" | |
1729 | vp = VolumePath("{group_id}", "{volume_id}") | |
1730 | vc.delete_volume(vp) | |
1731 | vc.purge_volume(vp) | |
1732 | """.format( | |
1733 | group_id=group_id, | |
1734 | volume_id=volume_id, | |
1735 | )), volume_prefix) |