]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_exports.py
update sources to 12.2.7
[ceph.git] / ceph / qa / tasks / cephfs / test_exports.py
CommitLineData
7c673cae
FG
1import logging
2import time
3from tasks.cephfs.fuse_mount import FuseMount
4from tasks.cephfs.cephfs_test_case import CephFSTestCase
5
6log = logging.getLogger(__name__)
7
8class TestExports(CephFSTestCase):
31f18b77 9 MDSS_REQUIRED = 2
28e407b8 10 CLIENTS_REQUIRED = 2
31f18b77 11
7c673cae
FG
12 def _wait_subtrees(self, status, rank, test):
13 timeout = 30
14 pause = 2
15 test = sorted(test)
16 for i in range(timeout/pause):
17 subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=status.get_rank(self.fs.id, rank)['name'])
18 subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees)
7c673cae 19 filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees])
31f18b77 20 log.info("%s =?= %s", filtered, test)
7c673cae 21 if filtered == test:
d2e6a577
FG
22 # Confirm export_pin in output is correct:
23 for s in subtrees:
24 self.assertTrue(s['export_pin'] == s['auth_first'])
7c673cae
FG
25 return subtrees
26 time.sleep(pause)
27 raise RuntimeError("rank {0} failed to reach desired subtree state", rank)
28
29 def test_export_pin(self):
7c673cae 30 self.fs.set_max_mds(2)
224ce89b 31 self.fs.wait_for_daemons()
7c673cae
FG
32
33 status = self.fs.status()
34
35 self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
36 self._wait_subtrees(status, 0, [])
37
38 # NOP
31f18b77 39 self.mount_a.setfattr("1", "ceph.dir.pin", "-1")
7c673cae
FG
40 self._wait_subtrees(status, 0, [])
41
42 # NOP (rank < -1)
31f18b77 43 self.mount_a.setfattr("1", "ceph.dir.pin", "-2341")
7c673cae
FG
44 self._wait_subtrees(status, 0, [])
45
46 # pin /1 to rank 1
31f18b77 47 self.mount_a.setfattr("1", "ceph.dir.pin", "1")
7c673cae
FG
48 self._wait_subtrees(status, 1, [('/1', 1)])
49
50 # Check export_targets is set properly
51 status = self.fs.status()
52 log.info(status)
53 r0 = status.get_rank(self.fs.id, 0)
54 self.assertTrue(sorted(r0['export_targets']) == [1])
55
56 # redundant pin /1/2 to rank 1
31f18b77 57 self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
7c673cae
FG
58 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
59
60 # change pin /1/2 to rank 0
31f18b77 61 self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
7c673cae
FG
62 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
63 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
64
65 # change pin /1/2/3 to (presently) non-existent rank 2
31f18b77 66 self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
7c673cae
FG
67 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
68 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
69
70 # change pin /1/2 back to rank 1
31f18b77 71 self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
7c673cae
FG
72 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
73
74 # add another directory pinned to 1
75 self.mount_a.run_shell(["mkdir", "-p", "1/4/5"])
31f18b77 76 self.mount_a.setfattr("1/4/5", "ceph.dir.pin", "1")
7c673cae
FG
77 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1), ('/1/4/5', 1)])
78
79 # change pin /1 to 0
31f18b77 80 self.mount_a.setfattr("1", "ceph.dir.pin", "0")
7c673cae
FG
81 self._wait_subtrees(status, 0, [('/1', 0), ('/1/2', 1), ('/1/4/5', 1)])
82
83 # change pin /1/2 to default (-1); does the subtree root properly respect it's parent pin?
31f18b77 84 self.mount_a.setfattr("1/2", "ceph.dir.pin", "-1")
7c673cae
FG
85 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1)])
86
87 if len(list(status.get_standbys())):
88 self.fs.set_max_mds(3)
89 self.fs.wait_for_state('up:active', rank=2)
90 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)])
91
92 # Check export_targets is set properly
93 status = self.fs.status()
94 log.info(status)
95 r0 = status.get_rank(self.fs.id, 0)
96 self.assertTrue(sorted(r0['export_targets']) == [1,2])
97 r1 = status.get_rank(self.fs.id, 1)
98 self.assertTrue(sorted(r1['export_targets']) == [0])
99 r2 = status.get_rank(self.fs.id, 2)
100 self.assertTrue(sorted(r2['export_targets']) == [])
101
102 # Test rename
103 self.mount_a.run_shell(["mkdir", "-p", "a/b", "aa/bb"])
31f18b77
FG
104 self.mount_a.setfattr("a", "ceph.dir.pin", "1")
105 self.mount_a.setfattr("aa/bb", "ceph.dir.pin", "0")
7c673cae
FG
106 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)])
107 self.mount_a.run_shell(["mv", "aa", "a/b/"])
108 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)])
28e407b8
AA
109
110 def test_session_race(self):
111 """
112 Test session creation race.
113
114 See: https://tracker.ceph.com/issues/24072#change-113056
115 """
116
117 self.fs.set_max_mds(2)
118 self.fs.wait_for_daemons()
119
120 status = self.fs.status()
121 rank1 = self.fs.get_rank(rank=1, status=status)
122 name1 = 'mds.'+rank1['name']
123
124 # Create a directory that is pre-exported to rank 1
125 self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
126 self.mount_a.setfattr("a", "ceph.dir.pin", "1")
127 self._wait_subtrees(status, 1, [('/a', 1)])
128
129 # Now set the mds config to allow the race
130 self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank=1)
131
132 # Now create another directory and try to export it
133 self.mount_b.run_shell(["mkdir", "-p", "b/bb"])
134 self.mount_b.setfattr("b", "ceph.dir.pin", "1")
135
136 time.sleep(5)
137
138 # Now turn off the race so that it doesn't wait again
139 self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "false"], rank=1)
140
141 # Now try to create a session with rank 1 by accessing a dir known to
142 # be there, if buggy, this should cause the rank 1 to crash:
143 self.mount_b.run_shell(["ls", "a"])
144
145 # Check if rank1 changed (standby tookover?)
146 new_rank1 = self.fs.get_rank(rank=1)
147 self.assertEqual(rank1['gid'], new_rank1['gid'])