]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_exports.py
060131add70f1bda4d4e52235ecc5ef8b40197b8
3 from StringIO
import StringIO
4 from tasks
.cephfs
.fuse_mount
import FuseMount
5 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
7 log
= logging
.getLogger(__name__
)
9 class TestExports(CephFSTestCase
):
13 def test_export_pin(self
):
14 self
.fs
.set_max_mds(2)
15 self
.fs
.wait_for_daemons()
17 status
= self
.fs
.status()
19 self
.mount_a
.run_shell(["mkdir", "-p", "1/2/3"])
20 self
._wait
_subtrees
(status
, 0, [])
23 self
.mount_a
.setfattr("1", "ceph.dir.pin", "-1")
24 self
._wait
_subtrees
(status
, 0, [])
27 self
.mount_a
.setfattr("1", "ceph.dir.pin", "-2341")
28 self
._wait
_subtrees
(status
, 0, [])
31 self
.mount_a
.setfattr("1", "ceph.dir.pin", "1")
32 self
._wait
_subtrees
(status
, 1, [('/1', 1)])
34 # Check export_targets is set properly
35 status
= self
.fs
.status()
37 r0
= status
.get_rank(self
.fs
.id, 0)
38 self
.assertTrue(sorted(r0
['export_targets']) == [1])
40 # redundant pin /1/2 to rank 1
41 self
.mount_a
.setfattr("1/2", "ceph.dir.pin", "1")
42 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 1)])
44 # change pin /1/2 to rank 0
45 self
.mount_a
.setfattr("1/2", "ceph.dir.pin", "0")
46 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 0)])
47 self
._wait
_subtrees
(status
, 0, [('/1', 1), ('/1/2', 0)])
49 # change pin /1/2/3 to (presently) non-existent rank 2
50 self
.mount_a
.setfattr("1/2/3", "ceph.dir.pin", "2")
51 self
._wait
_subtrees
(status
, 0, [('/1', 1), ('/1/2', 0)])
52 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 0)])
54 # change pin /1/2 back to rank 1
55 self
.mount_a
.setfattr("1/2", "ceph.dir.pin", "1")
56 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 1)])
58 # add another directory pinned to 1
59 self
.mount_a
.run_shell(["mkdir", "-p", "1/4/5"])
60 self
.mount_a
.setfattr("1/4/5", "ceph.dir.pin", "1")
61 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 1), ('/1/4/5', 1)])
64 self
.mount_a
.setfattr("1", "ceph.dir.pin", "0")
65 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/2', 1), ('/1/4/5', 1)])
67 # change pin /1/2 to default (-1); does the subtree root properly respect it's parent pin?
68 self
.mount_a
.setfattr("1/2", "ceph.dir.pin", "-1")
69 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/4/5', 1)])
71 if len(list(status
.get_standbys())):
72 self
.fs
.set_max_mds(3)
73 self
.fs
.wait_for_state('up:active', rank
=2)
74 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)])
76 # Check export_targets is set properly
77 status
= self
.fs
.status()
79 r0
= status
.get_rank(self
.fs
.id, 0)
80 self
.assertTrue(sorted(r0
['export_targets']) == [1,2])
81 r1
= status
.get_rank(self
.fs
.id, 1)
82 self
.assertTrue(sorted(r1
['export_targets']) == [0])
83 r2
= status
.get_rank(self
.fs
.id, 2)
84 self
.assertTrue(sorted(r2
['export_targets']) == [])
87 self
.mount_a
.run_shell(["mkdir", "-p", "a/b", "aa/bb"])
88 self
.mount_a
.setfattr("a", "ceph.dir.pin", "1")
89 self
.mount_a
.setfattr("aa/bb", "ceph.dir.pin", "0")
90 if (len(self
.fs
.get_active_names()) > 2):
91 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)])
93 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/aa/bb', 0)])
94 self
.mount_a
.run_shell(["mv", "aa", "a/b/"])
95 if (len(self
.fs
.get_active_names()) > 2):
96 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)])
98 self
._wait
_subtrees
(status
, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/a/b/aa/bb', 0)])
100 def test_export_pin_getfattr(self
):
101 self
.fs
.set_max_mds(2)
102 self
.fs
.wait_for_daemons()
104 status
= self
.fs
.status()
106 self
.mount_a
.run_shell(["mkdir", "-p", "1/2/3"])
107 self
._wait
_subtrees
(status
, 0, [])
110 self
.mount_a
.setfattr("1", "ceph.dir.pin", "1")
111 self
._wait
_subtrees
(status
, 1, [('/1', 1)])
114 self
.mount_a
.setfattr("1/2", "ceph.dir.pin", "1")
115 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 1)])
117 # change pin /1/2 to rank 0
118 self
.mount_a
.setfattr("1/2", "ceph.dir.pin", "0")
119 self
._wait
_subtrees
(status
, 1, [('/1', 1), ('/1/2', 0)])
120 self
._wait
_subtrees
(status
, 0, [('/1', 1), ('/1/2', 0)])
122 # change pin /1/2/3 to (presently) non-existent rank 2
123 self
.mount_a
.setfattr("1/2/3", "ceph.dir.pin", "2")
124 self
._wait
_subtrees
(status
, 0, [('/1', 1), ('/1/2', 0)])
126 if len(list(status
.get_standbys())):
127 self
.fs
.set_max_mds(3)
128 self
.fs
.wait_for_state('up:active', rank
=2)
129 self
._wait
_subtrees
(status
, 0, [('/1', 1), ('/1/2', 0), ('/1/2/3', 2)])
131 if not isinstance(self
.mount_a
, FuseMount
):
132 p
= self
.mount_a
.client_remote
.run(args
=['uname', '-r'], stdout
=StringIO(), wait
=True)
133 dir_pin
= self
.mount_a
.getfattr("1", "ceph.dir.pin")
134 log
.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin
)
135 if str(p
.stdout
.getvalue()) < "5" and not(dir_pin
):
136 self
.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin")
137 self
.assertTrue(self
.mount_a
.getfattr("1", "ceph.dir.pin") == "1")
138 self
.assertTrue(self
.mount_a
.getfattr("1/2", "ceph.dir.pin") == "0")
139 if (len(self
.fs
.get_active_names()) > 2):
140 self
.assertTrue(self
.mount_a
.getfattr("1/2/3", "ceph.dir.pin") == "2")
142 def test_session_race(self
):
144 Test session creation race.
146 See: https://tracker.ceph.com/issues/24072#change-113056
149 self
.fs
.set_max_mds(2)
150 status
= self
.fs
.wait_for_daemons()
152 rank1
= self
.fs
.get_rank(rank
=1, status
=status
)
154 # Create a directory that is pre-exported to rank 1
155 self
.mount_a
.run_shell(["mkdir", "-p", "a/aa"])
156 self
.mount_a
.setfattr("a", "ceph.dir.pin", "1")
157 self
._wait
_subtrees
(status
, 1, [('/a', 1)])
159 # Now set the mds config to allow the race
160 self
.fs
.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank
=1)
162 # Now create another directory and try to export it
163 self
.mount_b
.run_shell(["mkdir", "-p", "b/bb"])
164 self
.mount_b
.setfattr("b", "ceph.dir.pin", "1")
168 # Now turn off the race so that it doesn't wait again
169 self
.fs
.rank_asok(["config", "set", "mds_inject_migrator_session_race", "false"], rank
=1)
171 # Now try to create a session with rank 1 by accessing a dir known to
172 # be there, if buggy, this should cause the rank 1 to crash:
173 self
.mount_b
.run_shell(["ls", "a"])
175 # Check if rank1 changed (standby tookover?)
176 new_rank1
= self
.fs
.get_rank(rank
=1)
177 self
.assertEqual(rank1
['gid'], new_rank1
['gid'])