]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_exports.py
3 from tasks
.cephfs
.fuse_mount
import FuseMount
4 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
6 log
= logging
.getLogger(__name__
)
8 class TestExports(CephFSTestCase
):
12 def _wait_subtrees(self
, status
, rank
, test
):
16 for i
in range(timeout
/pause
):
17 subtrees
= self
.fs
.mds_asok(["get", "subtrees"], mds_id
=status
.get_rank(self
.fs
.id, rank
)['name'])
18 subtrees
= filter(lambda s
: s
['dir']['path'].startswith('/'), subtrees
)
19 filtered
= sorted([(s
['dir']['path'], s
['auth_first']) for s
in subtrees
])
20 log
.info("%s =?= %s", filtered
, test
)
22 # Confirm export_pin in output is correct:
24 self
.assertTrue(s
['export_pin'] == s
['auth_first'])
27 raise RuntimeError("rank {0} failed to reach desired subtree state", rank
)
29 def test_export_pin(self
):
30 self
.fs
.set_max_mds(2)
31 self
.fs
.wait_for_daemons()
33 status
= self
.fs
.status()
35 self
.mount_a
.run_shell(["mkdir", "-p", "1/2/3"])
36 self
._wait
_subtrees
(status
, 0, [])
39 self
.mount_a
.setfattr("1", "ceph.dir.pin", "-1")
40 self
._wait
_subtrees
(status
, 0, [])
43 self
.mount_a
.setfattr("1", "ceph.dir.pin", "-2341")
44 self
._wait
_subtrees
(status
, 0, [])
47 self
.mount_a
.setfattr("1", "ceph.dir.pin", "1")
48 self
._wait
_subtrees
(status
, 1, [('/1', 1)])
50 # Check export_targets is set properly
51 status
= self
.fs
.status()
53 r0
= status
.get_rank(self
.fs
.id, 0)
54 self
.assertTrue(sorted(r0
['export_targets']) == [1])
56 # redundant pin /1/2 to rank 1
57 self
.mount_a
.setfattr("1/2", "ceph.dir.pin", "1")
58 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 1)])
60 # change pin /1/2 to rank 0
61 self
.mount_a
.setfattr("1/2", "ceph.dir.pin", "0")
62 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 0)])
63 self
._wait
_subtrees
(status
, 0, [('/1', 1), ('/1/2', 0)])
65 # change pin /1/2/3 to (presently) non-existent rank 2
66 self
.mount_a
.setfattr("1/2/3", "ceph.dir.pin", "2")
67 self
._wait
_subtrees
(status
, 0, [('/1', 1), ('/1/2', 0)])
68 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 0)])
70 # change pin /1/2 back to rank 1
71 self
.mount_a
.setfattr("1/2", "ceph.dir.pin", "1")
72 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 1)])
74 # add another directory pinned to 1
75 self
.mount_a
.run_shell(["mkdir", "-p", "1/4/5"])
76 self
.mount_a
.setfattr("1/4/5", "ceph.dir.pin", "1")
77 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 1), ('/1/4/5', 1)])
80 self
.mount_a
.setfattr("1", "ceph.dir.pin", "0")
81 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/2', 1), ('/1/4/5', 1)])
83 # change pin /1/2 to default (-1); does the subtree root properly respect it's parent pin?
84 self
.mount_a
.setfattr("1/2", "ceph.dir.pin", "-1")
85 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/4/5', 1)])
87 if len(list(status
.get_standbys())):
88 self
.fs
.set_max_mds(3)
89 self
.fs
.wait_for_state('up:active', rank
=2)
90 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)])
92 # Check export_targets is set properly
93 status
= self
.fs
.status()
95 r0
= status
.get_rank(self
.fs
.id, 0)
96 self
.assertTrue(sorted(r0
['export_targets']) == [1,2])
97 r1
= status
.get_rank(self
.fs
.id, 1)
98 self
.assertTrue(sorted(r1
['export_targets']) == [0])
99 r2
= status
.get_rank(self
.fs
.id, 2)
100 self
.assertTrue(sorted(r2
['export_targets']) == [])
103 self
.mount_a
.run_shell(["mkdir", "-p", "a/b", "aa/bb"])
104 self
.mount_a
.setfattr("a", "ceph.dir.pin", "1")
105 self
.mount_a
.setfattr("aa/bb", "ceph.dir.pin", "0")
106 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)])
107 self
.mount_a
.run_shell(["mv", "aa", "a/b/"])
108 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)])
110 def test_session_race(self
):
112 Test session creation race.
114 See: https://tracker.ceph.com/issues/24072#change-113056
117 self
.fs
.set_max_mds(2)
118 self
.fs
.wait_for_daemons()
120 status
= self
.fs
.status()
121 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
122 name1
= 'mds.'+rank1
['name']
124 # Create a directory that is pre-exported to rank 1
125 self
.mount_a
.run_shell(["mkdir", "-p", "a/aa"])
126 self
.mount_a
.setfattr("a", "ceph.dir.pin", "1")
127 self
._wait
_subtrees
(status
, 1, [('/a', 1)])
129 # Now set the mds config to allow the race
130 self
.fs
.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank
=1)
132 # Now create another directory and try to export it
133 self
.mount_b
.run_shell(["mkdir", "-p", "b/bb"])
134 self
.mount_b
.setfattr("b", "ceph.dir.pin", "1")
138 # Now turn off the race so that it doesn't wait again
139 self
.fs
.rank_asok(["config", "set", "mds_inject_migrator_session_race", "false"], rank
=1)
141 # Now try to create a session with rank 1 by accessing a dir known to
142 # be there, if buggy, this should cause the rank 1 to crash:
143 self
.mount_b
.run_shell(["ls", "a"])
145 # Check if rank1 changed (standby tookover?)
146 new_rank1
= self
.fs
.get_rank(rank
=1)
147 self
.assertEqual(rank1
['gid'], new_rank1
['gid'])