]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_exports.py
import ceph nautilus 14.2.2
[ceph.git] / ceph / qa / tasks / cephfs / test_exports.py
1 import logging
2 import time
3 from tasks.cephfs.fuse_mount import FuseMount
4 from tasks.cephfs.cephfs_test_case import CephFSTestCase
5
6 log = logging.getLogger(__name__)
7
8 class TestExports(CephFSTestCase):
9 MDSS_REQUIRED = 2
10 CLIENTS_REQUIRED = 2
11
12 def test_export_pin(self):
13 self.fs.set_max_mds(2)
14 self.fs.wait_for_daemons()
15
16 status = self.fs.status()
17
18 self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
19 self._wait_subtrees(status, 0, [])
20
21 # NOP
22 self.mount_a.setfattr("1", "ceph.dir.pin", "-1")
23 self._wait_subtrees(status, 0, [])
24
25 # NOP (rank < -1)
26 self.mount_a.setfattr("1", "ceph.dir.pin", "-2341")
27 self._wait_subtrees(status, 0, [])
28
29 # pin /1 to rank 1
30 self.mount_a.setfattr("1", "ceph.dir.pin", "1")
31 self._wait_subtrees(status, 1, [('/1', 1)])
32
33 # Check export_targets is set properly
34 status = self.fs.status()
35 log.info(status)
36 r0 = status.get_rank(self.fs.id, 0)
37 self.assertTrue(sorted(r0['export_targets']) == [1])
38
39 # redundant pin /1/2 to rank 1
40 self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
41 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
42
43 # change pin /1/2 to rank 0
44 self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
45 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
46 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
47
48 # change pin /1/2/3 to (presently) non-existent rank 2
49 self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
50 self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
51 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
52
53 # change pin /1/2 back to rank 1
54 self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
55 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
56
57 # add another directory pinned to 1
58 self.mount_a.run_shell(["mkdir", "-p", "1/4/5"])
59 self.mount_a.setfattr("1/4/5", "ceph.dir.pin", "1")
60 self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1), ('/1/4/5', 1)])
61
62 # change pin /1 to 0
63 self.mount_a.setfattr("1", "ceph.dir.pin", "0")
64 self._wait_subtrees(status, 0, [('/1', 0), ('/1/2', 1), ('/1/4/5', 1)])
65
66 # change pin /1/2 to default (-1); does the subtree root properly respect it's parent pin?
67 self.mount_a.setfattr("1/2", "ceph.dir.pin", "-1")
68 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1)])
69
70 if len(list(status.get_standbys())):
71 self.fs.set_max_mds(3)
72 self.fs.wait_for_state('up:active', rank=2)
73 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)])
74
75 # Check export_targets is set properly
76 status = self.fs.status()
77 log.info(status)
78 r0 = status.get_rank(self.fs.id, 0)
79 self.assertTrue(sorted(r0['export_targets']) == [1,2])
80 r1 = status.get_rank(self.fs.id, 1)
81 self.assertTrue(sorted(r1['export_targets']) == [0])
82 r2 = status.get_rank(self.fs.id, 2)
83 self.assertTrue(sorted(r2['export_targets']) == [])
84
85 # Test rename
86 self.mount_a.run_shell(["mkdir", "-p", "a/b", "aa/bb"])
87 self.mount_a.setfattr("a", "ceph.dir.pin", "1")
88 self.mount_a.setfattr("aa/bb", "ceph.dir.pin", "0")
89 if (len(self.fs.get_active_names()) > 2):
90 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)])
91 else:
92 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/aa/bb', 0)])
93 self.mount_a.run_shell(["mv", "aa", "a/b/"])
94 if (len(self.fs.get_active_names()) > 2):
95 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)])
96 else:
97 self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/a/b/aa/bb', 0)])
98
99 # Test getfattr
100 self.assertTrue(self.mount_a.getfattr("1", "ceph.dir.pin") == "0")
101 self.assertTrue(self.mount_a.getfattr("1/4", "ceph.dir.pin") == "-1")
102 self.assertTrue(self.mount_a.getfattr("1/4/5", "ceph.dir.pin") == "1")
103 if (len(self.fs.get_active_names()) > 2):
104 self.assertTrue(self.mount_a.getfattr("1/2/3", "ceph.dir.pin") == "2")
105
106 def test_session_race(self):
107 """
108 Test session creation race.
109
110 See: https://tracker.ceph.com/issues/24072#change-113056
111 """
112
113 self.fs.set_max_mds(2)
114 status = self.fs.wait_for_daemons()
115
116 rank1 = self.fs.get_rank(rank=1, status=status)
117 name1 = 'mds.'+rank1['name']
118
119 # Create a directory that is pre-exported to rank 1
120 self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
121 self.mount_a.setfattr("a", "ceph.dir.pin", "1")
122 self._wait_subtrees(status, 1, [('/a', 1)])
123
124 # Now set the mds config to allow the race
125 self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank=1)
126
127 # Now create another directory and try to export it
128 self.mount_b.run_shell(["mkdir", "-p", "b/bb"])
129 self.mount_b.setfattr("b", "ceph.dir.pin", "1")
130
131 time.sleep(5)
132
133 # Now turn off the race so that it doesn't wait again
134 self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "false"], rank=1)
135
136 # Now try to create a session with rank 1 by accessing a dir known to
137 # be there, if buggy, this should cause the rank 1 to crash:
138 self.mount_b.run_shell(["ls", "a"])
139
140 # Check if rank1 changed (standby tookover?)
141 new_rank1 = self.fs.get_rank(rank=1)
142 self.assertEqual(rank1['gid'], new_rank1['gid'])