]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_exports.py
060131add70f1bda4d4e52235ecc5ef8b40197b8
[ceph.git] / ceph / qa / tasks / cephfs / test_exports.py
1 import logging
2 import time
3 from StringIO import StringIO
4 from tasks.cephfs.fuse_mount import FuseMount
5 from tasks.cephfs.cephfs_test_case import CephFSTestCase
6
7 log = logging.getLogger(__name__)
8
9 class TestExports(CephFSTestCase):
10 MDSS_REQUIRED = 2
11 CLIENTS_REQUIRED = 2
12
13 def test_export_pin(self):
14 self.fs.set_max_mds(2)
15 self.fs.wait_for_daemons()
16
17 status = self.fs.status()
18
19 self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
20 self._wait_subtrees(status, 0, [])
21
22 # NOP
23 self.mount_a.setfattr("1", "ceph.dir.pin", "-1")
24 self._wait_subtrees(status, 0, [])
25
26 # NOP (rank < -1)
27 self.mount_a.setfattr("1", "ceph.dir.pin", "-2341")
28 self._wait_subtrees(status, 0, [])
29
30 # pin /1 to rank 1
31 self.mount_a.setfattr("1", "ceph.dir.pin", "1")
32 self._wait_subtrees(status, 1, [('/1', 1)])
33
34 # Check export_targets is set properly
35 status = self.fs.status()
36 log.info(status)
37 r0 = status.get_rank(self.fs.id, 0)
38 self.assertTrue(sorted(r0['export_targets']) == [1])
39
40 # redundant pin /1/2 to rank 1
41 self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
42 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
43
44 # change pin /1/2 to rank 0
45 self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
46 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
47 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
48
49 # change pin /1/2/3 to (presently) non-existent rank 2
50 self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
51 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
52 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
53
54 # change pin /1/2 back to rank 1
55 self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
56 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
57
58 # add another directory pinned to 1
59 self.mount_a.run_shell(["mkdir", "-p", "1/4/5"])
60 self.mount_a.setfattr("1/4/5", "ceph.dir.pin", "1")
61 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1), ('/1/4/5', 1)])
62
63 # change pin /1 to 0
64 self.mount_a.setfattr("1", "ceph.dir.pin", "0")
65 self._wait_subtrees(status, 0, [('/1', 0), ('/1/2', 1), ('/1/4/5', 1)])
66
67 # change pin /1/2 to default (-1); does the subtree root properly respect it's parent pin?
68 self.mount_a.setfattr("1/2", "ceph.dir.pin", "-1")
69 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1)])
70
71 if len(list(status.get_standbys())):
72 self.fs.set_max_mds(3)
73 self.fs.wait_for_state('up:active', rank=2)
74 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)])
75
76 # Check export_targets is set properly
77 status = self.fs.status()
78 log.info(status)
79 r0 = status.get_rank(self.fs.id, 0)
80 self.assertTrue(sorted(r0['export_targets']) == [1,2])
81 r1 = status.get_rank(self.fs.id, 1)
82 self.assertTrue(sorted(r1['export_targets']) == [0])
83 r2 = status.get_rank(self.fs.id, 2)
84 self.assertTrue(sorted(r2['export_targets']) == [])
85
86 # Test rename
87 self.mount_a.run_shell(["mkdir", "-p", "a/b", "aa/bb"])
88 self.mount_a.setfattr("a", "ceph.dir.pin", "1")
89 self.mount_a.setfattr("aa/bb", "ceph.dir.pin", "0")
90 if (len(self.fs.get_active_names()) > 2):
91 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)])
92 else:
93 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/aa/bb', 0)])
94 self.mount_a.run_shell(["mv", "aa", "a/b/"])
95 if (len(self.fs.get_active_names()) > 2):
96 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)])
97 else:
98 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/a/b/aa/bb', 0)])
99
100 def test_export_pin_getfattr(self):
101 self.fs.set_max_mds(2)
102 self.fs.wait_for_daemons()
103
104 status = self.fs.status()
105
106 self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
107 self._wait_subtrees(status, 0, [])
108
109 # pin /1 to rank 0
110 self.mount_a.setfattr("1", "ceph.dir.pin", "1")
111 self._wait_subtrees(status, 1, [('/1', 1)])
112
113 # pin /1/2 to rank 1
114 self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
115 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
116
117 # change pin /1/2 to rank 0
118 self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
119 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
120 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
121
122 # change pin /1/2/3 to (presently) non-existent rank 2
123 self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
124 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
125
126 if len(list(status.get_standbys())):
127 self.fs.set_max_mds(3)
128 self.fs.wait_for_state('up:active', rank=2)
129 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0), ('/1/2/3', 2)])
130
131 if not isinstance(self.mount_a, FuseMount):
132 p = self.mount_a.client_remote.run(args=['uname', '-r'], stdout=StringIO(), wait=True)
133 dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin")
134 log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin)
135 if str(p.stdout.getvalue()) < "5" and not(dir_pin):
136 self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin")
137 self.assertTrue(self.mount_a.getfattr("1", "ceph.dir.pin") == "1")
138 self.assertTrue(self.mount_a.getfattr("1/2", "ceph.dir.pin") == "0")
139 if (len(self.fs.get_active_names()) > 2):
140 self.assertTrue(self.mount_a.getfattr("1/2/3", "ceph.dir.pin") == "2")
141
142 def test_session_race(self):
143 """
144 Test session creation race.
145
146 See: https://tracker.ceph.com/issues/24072#change-113056
147 """
148
149 self.fs.set_max_mds(2)
150 status = self.fs.wait_for_daemons()
151
152 rank1 = self.fs.get_rank(rank=1, status=status)
153
154 # Create a directory that is pre-exported to rank 1
155 self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
156 self.mount_a.setfattr("a", "ceph.dir.pin", "1")
157 self._wait_subtrees(status, 1, [('/a', 1)])
158
159 # Now set the mds config to allow the race
160 self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank=1)
161
162 # Now create another directory and try to export it
163 self.mount_b.run_shell(["mkdir", "-p", "b/bb"])
164 self.mount_b.setfattr("b", "ceph.dir.pin", "1")
165
166 time.sleep(5)
167
168 # Now turn off the race so that it doesn't wait again
169 self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "false"], rank=1)
170
171 # Now try to create a session with rank 1 by accessing a dir known to
172 # be there, if buggy, this should cause the rank 1 to crash:
173 self.mount_b.run_shell(["ls", "a"])
174
175 # Check if rank1 changed (standby tookover?)
176 new_rank1 = self.fs.get_rank(rank=1)
177 self.assertEqual(rank1['gid'], new_rank1['gid'])