]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_sessionmap.py
e9b4b646d8f184ac3638c15d64044303c6e39af2
1 from StringIO
import StringIO
4 from unittest
import SkipTest
6 from tasks
.cephfs
.fuse_mount
import FuseMount
7 from teuthology
.exceptions
import CommandFailedError
8 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
10 log
= logging
.getLogger(__name__
)
13 class TestSessionMap(CephFSTestCase
):
17 def test_tell_session_drop(self
):
19 That when a `tell` command is sent using the python CLI,
20 its MDS session is gone after it terminates
22 self
.mount_a
.umount_wait()
23 self
.mount_b
.umount_wait()
25 mds_id
= self
.fs
.get_lone_mds_id()
26 self
.fs
.mon_manager
.raw_cluster_cmd("tell", "mds.{0}".format(mds_id
), "session", "ls")
28 ls_data
= self
.fs
.mds_asok(['session', 'ls'])
29 self
.assertEqual(len(ls_data
), 0)
31 def _get_thread_count(self
, mds_id
):
32 remote
= self
.fs
.mds_daemons
[mds_id
].remote
35 args
=["ps", "-ww", "axo", "nlwp,cmd"],
37 ).stdout
.getvalue().strip()
38 lines
= ps_txt
.split("\n")[1:]
41 if "ceph-mds" in line
and not "daemon-helper" in line
:
42 if line
.find("-i {0}".format(mds_id
)) != -1:
43 log
.info("Found ps line for daemon: {0}".format(line
))
44 return int(line
.split()[0])
46 raise RuntimeError("No process found in ps output for MDS {0}: {1}".format(
50 def test_tell_conn_close(self
):
52 That when a `tell` command is sent using the python CLI,
53 the thread count goes back to where it started (i.e. we aren't
54 leaving connections open)
56 self
.mount_a
.umount_wait()
57 self
.mount_b
.umount_wait()
59 mds_id
= self
.fs
.get_lone_mds_id()
61 initial_thread_count
= self
._get
_thread
_count
(mds_id
)
62 self
.fs
.mon_manager
.raw_cluster_cmd("tell", "mds.{0}".format(mds_id
), "session", "ls")
63 final_thread_count
= self
._get
_thread
_count
(mds_id
)
65 self
.assertEqual(initial_thread_count
, final_thread_count
)
67 def test_mount_conn_close(self
):
69 That when a client unmounts, the thread count on the MDS goes back
70 to what it was before the client mounted
72 self
.mount_a
.umount_wait()
73 self
.mount_b
.umount_wait()
75 mds_id
= self
.fs
.get_lone_mds_id()
77 initial_thread_count
= self
._get
_thread
_count
(mds_id
)
79 self
.mount_a
.wait_until_mounted()
80 self
.assertGreater(self
._get
_thread
_count
(mds_id
), initial_thread_count
)
81 self
.mount_a
.umount_wait()
82 final_thread_count
= self
._get
_thread
_count
(mds_id
)
84 self
.assertEqual(initial_thread_count
, final_thread_count
)
86 def test_version_splitting(self
):
88 That when many sessions are updated, they are correctly
89 split into multiple versions to obey mds_sessionmap_keys_per_op
93 self
.mount_a
.umount_wait()
94 self
.mount_b
.umount_wait()
96 # Configure MDS to write one OMAP key at once
97 self
.set_conf('mds', 'mds_sessionmap_keys_per_op', 1)
98 self
.fs
.mds_fail_restart()
99 self
.fs
.wait_for_daemons()
101 # I would like two MDSs, so that I can do an export dir later
102 self
.fs
.set_allow_multimds(True)
103 self
.fs
.set_max_mds(2)
104 self
.fs
.wait_for_daemons()
106 active_mds_names
= self
.fs
.get_active_names()
107 rank_0_id
= active_mds_names
[0]
108 rank_1_id
= active_mds_names
[1]
109 log
.info("Ranks 0 and 1 are {0} and {1}".format(
110 rank_0_id
, rank_1_id
))
112 # Bring the clients back
115 self
.mount_a
.create_files() # Kick the client into opening sessions
116 self
.mount_b
.create_files()
118 # See that they've got sessions
119 self
.assert_session_count(2, mds_id
=rank_0_id
)
121 # See that we persist their sessions
122 self
.fs
.mds_asok(["flush", "journal"], rank_0_id
)
123 table_json
= json
.loads(self
.fs
.table_tool(["0", "show", "session"]))
124 log
.info("SessionMap: {0}".format(json
.dumps(table_json
, indent
=2)))
125 self
.assertEqual(table_json
['0']['result'], 0)
126 self
.assertEqual(len(table_json
['0']['data']['Sessions']), 2)
128 # Now, induce a "force_open_sessions" event by exporting a dir
129 self
.mount_a
.run_shell(["mkdir", "bravo"])
130 self
.mount_a
.run_shell(["touch", "bravo/file"])
131 self
.mount_b
.run_shell(["ls", "-l", "bravo/file"])
134 return self
.fs
.mds_asok(['perf', 'dump', 'objecter'], rank_1_id
)['objecter']['omap_wr']
136 # Flush so that there are no dirty sessions on rank 1
137 self
.fs
.mds_asok(["flush", "journal"], rank_1_id
)
139 # Export so that we get a force_open to rank 1 for the two sessions from rank 0
140 initial_omap_wrs
= get_omap_wrs()
141 self
.fs
.mds_asok(['export', 'dir', '/bravo', '1'], rank_0_id
)
143 # This is the critical (if rather subtle) check: that in the process of doing an export dir,
144 # we hit force_open_sessions, and as a result we end up writing out the sessionmap. There
145 # will be two sessions dirtied here, and because we have set keys_per_op to 1, we should see
146 # a single session get written out (the first of the two, triggered by the second getting marked
148 # The number of writes is two per session, because the header (sessionmap version) update and
149 # KV write both count.
150 self
.wait_until_true(
151 lambda: get_omap_wrs() - initial_omap_wrs
== 2,
152 timeout
=10 # Long enough for an export to get acked
155 # Now end our sessions and check the backing sessionmap is updated correctly
156 self
.mount_a
.umount_wait()
157 self
.mount_b
.umount_wait()
159 # In-memory sessionmap check
160 self
.assert_session_count(0, mds_id
=rank_0_id
)
162 # On-disk sessionmap check
163 self
.fs
.mds_asok(["flush", "journal"], rank_0_id
)
164 table_json
= json
.loads(self
.fs
.table_tool(["0", "show", "session"]))
165 log
.info("SessionMap: {0}".format(json
.dumps(table_json
, indent
=2)))
166 self
.assertEqual(table_json
['0']['result'], 0)
167 self
.assertEqual(len(table_json
['0']['data']['Sessions']), 0)
169 def _sudo_write_file(self
, remote
, path
, data
):
171 Write data to a remote file as super user
173 :param remote: Remote site.
174 :param path: Path on the remote being written to.
175 :param data: Data to be written.
177 Both perms and owner are passed directly to chmod.
184 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
190 def _configure_auth(self
, mount
, id_name
, mds_caps
, osd_caps
=None, mon_caps
=None):
192 Set up auth credentials for a client mount, and write out the keyring
193 for the client to use.
197 osd_caps
= "allow rw"
202 out
= self
.fs
.mon_manager
.raw_cluster_cmd(
203 "auth", "get-or-create", "client.{name}".format(name
=id_name
),
208 mount
.client_id
= id_name
209 self
._sudo
_write
_file
(mount
.client_remote
, mount
.get_keyring_path(), out
)
210 self
.set_conf("client.{name}".format(name
=id_name
), "keyring", mount
.get_keyring_path())
212 def test_session_reject(self
):
213 if not isinstance(self
.mount_a
, FuseMount
):
214 raise SkipTest("Requires FUSE client to inject client metadata")
216 self
.mount_a
.run_shell(["mkdir", "foo"])
217 self
.mount_a
.run_shell(["mkdir", "foo/bar"])
218 self
.mount_a
.umount_wait()
220 # Mount B will be my rejected client
221 self
.mount_b
.umount_wait()
223 # Configure a client that is limited to /foo/bar
224 self
._configure
_auth
(self
.mount_b
, "badguy", "allow rw path=/foo/bar")
225 # Check he can mount that dir and do IO
226 self
.mount_b
.mount(mount_path
="/foo/bar")
227 self
.mount_b
.wait_until_mounted()
228 self
.mount_b
.create_destroy()
229 self
.mount_b
.umount_wait()
231 # Configure the client to claim that its mount point metadata is /baz
232 self
.set_conf("client.badguy", "client_metadata", "root=/baz")
233 # Try to mount the client, see that it fails
234 with self
.assert_cluster_log("client session with invalid root '/baz' denied"):
235 with self
.assertRaises(CommandFailedError
):
236 self
.mount_b
.mount(mount_path
="/foo/bar")