]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_exports.py
import new upstream nautilus stable release 14.2.8
[ceph.git] / ceph / qa / tasks / cephfs / test_exports.py
CommitLineData
7c673cae
FG
1import logging
2import time
92f5a8d4 3from StringIO import StringIO
7c673cae
FG
4from tasks.cephfs.fuse_mount import FuseMount
5from tasks.cephfs.cephfs_test_case import CephFSTestCase
6
7log = logging.getLogger(__name__)
8
9class TestExports(CephFSTestCase):
31f18b77 10 MDSS_REQUIRED = 2
28e407b8 11 CLIENTS_REQUIRED = 2
31f18b77 12
7c673cae 13 def test_export_pin(self):
7c673cae 14 self.fs.set_max_mds(2)
224ce89b 15 self.fs.wait_for_daemons()
7c673cae
FG
16
17 status = self.fs.status()
18
19 self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
20 self._wait_subtrees(status, 0, [])
21
22 # NOP
31f18b77 23 self.mount_a.setfattr("1", "ceph.dir.pin", "-1")
7c673cae
FG
24 self._wait_subtrees(status, 0, [])
25
26 # NOP (rank < -1)
31f18b77 27 self.mount_a.setfattr("1", "ceph.dir.pin", "-2341")
7c673cae
FG
28 self._wait_subtrees(status, 0, [])
29
30 # pin /1 to rank 1
31f18b77 31 self.mount_a.setfattr("1", "ceph.dir.pin", "1")
7c673cae
FG
32 self._wait_subtrees(status, 1, [('/1', 1)])
33
34 # Check export_targets is set properly
35 status = self.fs.status()
36 log.info(status)
37 r0 = status.get_rank(self.fs.id, 0)
38 self.assertTrue(sorted(r0['export_targets']) == [1])
39
40 # redundant pin /1/2 to rank 1
31f18b77 41 self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
7c673cae
FG
42 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
43
44 # change pin /1/2 to rank 0
31f18b77 45 self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
7c673cae
FG
46 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
47 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
48
49 # change pin /1/2/3 to (presently) non-existent rank 2
31f18b77 50 self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
7c673cae
FG
51 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
52 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
53
54 # change pin /1/2 back to rank 1
31f18b77 55 self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
7c673cae
FG
56 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
57
58 # add another directory pinned to 1
59 self.mount_a.run_shell(["mkdir", "-p", "1/4/5"])
31f18b77 60 self.mount_a.setfattr("1/4/5", "ceph.dir.pin", "1")
7c673cae
FG
61 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1), ('/1/4/5', 1)])
62
63 # change pin /1 to 0
31f18b77 64 self.mount_a.setfattr("1", "ceph.dir.pin", "0")
7c673cae
FG
65 self._wait_subtrees(status, 0, [('/1', 0), ('/1/2', 1), ('/1/4/5', 1)])
66
67 # change pin /1/2 to default (-1); does the subtree root properly respect it's parent pin?
31f18b77 68 self.mount_a.setfattr("1/2", "ceph.dir.pin", "-1")
7c673cae
FG
69 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1)])
70
71 if len(list(status.get_standbys())):
72 self.fs.set_max_mds(3)
73 self.fs.wait_for_state('up:active', rank=2)
74 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)])
75
76 # Check export_targets is set properly
77 status = self.fs.status()
78 log.info(status)
79 r0 = status.get_rank(self.fs.id, 0)
80 self.assertTrue(sorted(r0['export_targets']) == [1,2])
81 r1 = status.get_rank(self.fs.id, 1)
82 self.assertTrue(sorted(r1['export_targets']) == [0])
83 r2 = status.get_rank(self.fs.id, 2)
84 self.assertTrue(sorted(r2['export_targets']) == [])
85
86 # Test rename
87 self.mount_a.run_shell(["mkdir", "-p", "a/b", "aa/bb"])
31f18b77
FG
88 self.mount_a.setfattr("a", "ceph.dir.pin", "1")
89 self.mount_a.setfattr("aa/bb", "ceph.dir.pin", "0")
11fdf7f2
TL
90 if (len(self.fs.get_active_names()) > 2):
91 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)])
92 else:
93 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/aa/bb', 0)])
7c673cae 94 self.mount_a.run_shell(["mv", "aa", "a/b/"])
11fdf7f2
TL
95 if (len(self.fs.get_active_names()) > 2):
96 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)])
97 else:
98 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/a/b/aa/bb', 0)])
99
92f5a8d4
TL
100 def test_export_pin_getfattr(self):
101 self.fs.set_max_mds(2)
102 self.fs.wait_for_daemons()
103
104 status = self.fs.status()
105
106 self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
107 self._wait_subtrees(status, 0, [])
108
109 # pin /1 to rank 0
110 self.mount_a.setfattr("1", "ceph.dir.pin", "1")
111 self._wait_subtrees(status, 1, [('/1', 1)])
112
113 # pin /1/2 to rank 1
114 self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
115 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
116
117 # change pin /1/2 to rank 0
118 self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
119 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
120 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
121
122 # change pin /1/2/3 to (presently) non-existent rank 2
123 self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
124 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
125
126 if len(list(status.get_standbys())):
127 self.fs.set_max_mds(3)
128 self.fs.wait_for_state('up:active', rank=2)
129 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0), ('/1/2/3', 2)])
130
131 if not isinstance(self.mount_a, FuseMount):
132 p = self.mount_a.client_remote.run(args=['uname', '-r'], stdout=StringIO(), wait=True)
133 dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin")
134 log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin)
135 if str(p.stdout.getvalue()) < "5" and not(dir_pin):
136 self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin")
137 self.assertTrue(self.mount_a.getfattr("1", "ceph.dir.pin") == "1")
138 self.assertTrue(self.mount_a.getfattr("1/2", "ceph.dir.pin") == "0")
11fdf7f2
TL
139 if (len(self.fs.get_active_names()) > 2):
140 self.assertTrue(self.mount_a.getfattr("1/2/3", "ceph.dir.pin") == "2")
28e407b8
AA
141
142 def test_session_race(self):
143 """
144 Test session creation race.
145
146 See: https://tracker.ceph.com/issues/24072#change-113056
147 """
148
149 self.fs.set_max_mds(2)
11fdf7f2 150 status = self.fs.wait_for_daemons()
28e407b8 151
28e407b8
AA
152 rank1 = self.fs.get_rank(rank=1, status=status)
153 name1 = 'mds.'+rank1['name']
154
155 # Create a directory that is pre-exported to rank 1
156 self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
157 self.mount_a.setfattr("a", "ceph.dir.pin", "1")
158 self._wait_subtrees(status, 1, [('/a', 1)])
159
160 # Now set the mds config to allow the race
161 self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank=1)
162
163 # Now create another directory and try to export it
164 self.mount_b.run_shell(["mkdir", "-p", "b/bb"])
165 self.mount_b.setfattr("b", "ceph.dir.pin", "1")
166
167 time.sleep(5)
168
169 # Now turn off the race so that it doesn't wait again
170 self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "false"], rank=1)
171
172 # Now try to create a session with rank 1 by accessing a dir known to
173 # be there, if buggy, this should cause the rank 1 to crash:
174 self.mount_b.run_shell(["ls", "a"])
175
176 # Check if rank1 changed (standby tookover?)
177 new_rank1 = self.fs.get_rank(rank=1)
178 self.assertEqual(rank1['gid'], new_rank1['gid'])