]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_journal_repair.py
b810e1a28eff814a4399cdc9085067f90c08c232
[ceph.git] / ceph / qa / tasks / cephfs / test_journal_repair.py
1
2 """
3 Test our tools for recovering the content of damaged journals
4 """
5
6 import json
7 import logging
8 from textwrap import dedent
9 import time
10
11 from teuthology.exceptions import CommandFailedError, ConnectionLostError
12 from tasks.cephfs.filesystem import ObjectNotFound, ROOT_INO
13 from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
14 from tasks.workunit import task as workunit
15
16 log = logging.getLogger(__name__)
17
18
19 class TestJournalRepair(CephFSTestCase):
20 MDSS_REQUIRED = 2
21
22 def test_inject_to_empty(self):
23 """
24 That when some dentries in the journal but nothing is in
25 the backing store, we correctly populate the backing store
26 from the journalled dentries.
27 """
28
29 # Inject metadata operations
30 self.mount_a.run_shell(["touch", "rootfile"])
31 self.mount_a.run_shell(["mkdir", "subdir"])
32 self.mount_a.run_shell(["touch", "subdir/subdirfile"])
33 # There are several different paths for handling hardlinks, depending
34 # on whether an existing dentry (being overwritten) is also a hardlink
35 self.mount_a.run_shell(["mkdir", "linkdir"])
36
37 # Test inode -> remote transition for a dentry
38 self.mount_a.run_shell(["touch", "linkdir/link0"])
39 self.mount_a.run_shell(["rm", "-f", "linkdir/link0"])
40 self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link0"])
41
42 # Test nothing -> remote transition
43 self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link1"])
44
45 # Test remote -> inode transition
46 self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link2"])
47 self.mount_a.run_shell(["rm", "-f", "linkdir/link2"])
48 self.mount_a.run_shell(["touch", "linkdir/link2"])
49
50 # Test remote -> diff remote transition
51 self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link3"])
52 self.mount_a.run_shell(["rm", "-f", "linkdir/link3"])
53 self.mount_a.run_shell(["ln", "rootfile", "linkdir/link3"])
54
55 # Test an empty directory
56 self.mount_a.run_shell(["mkdir", "subdir/subsubdir"])
57 self.mount_a.run_shell(["sync"])
58
59 # Before we unmount, make a note of the inode numbers, later we will
60 # check that they match what we recover from the journal
61 rootfile_ino = self.mount_a.path_to_ino("rootfile")
62 subdir_ino = self.mount_a.path_to_ino("subdir")
63 linkdir_ino = self.mount_a.path_to_ino("linkdir")
64 subdirfile_ino = self.mount_a.path_to_ino("subdir/subdirfile")
65 subsubdir_ino = self.mount_a.path_to_ino("subdir/subsubdir")
66
67 self.mount_a.umount_wait()
68
69 # Stop the MDS
70 self.fs.mds_stop()
71 self.fs.mds_fail()
72
73 # Now, the journal should contain the operations, but the backing
74 # store shouldn't
75 with self.assertRaises(ObjectNotFound):
76 self.fs.list_dirfrag(subdir_ino)
77 self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])
78
79 # Execute the dentry recovery, this should populate the backing store
80 self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0)
81
82 # Dentries in ROOT_INO are present
83 self.assertEqual(sorted(self.fs.list_dirfrag(ROOT_INO)), sorted(['rootfile_head', 'subdir_head', 'linkdir_head']))
84 self.assertEqual(self.fs.list_dirfrag(subdir_ino), ['subdirfile_head', 'subsubdir_head'])
85 self.assertEqual(sorted(self.fs.list_dirfrag(linkdir_ino)),
86 sorted(['link0_head', 'link1_head', 'link2_head', 'link3_head']))
87
88 # Now check the MDS can read what we wrote: truncate the journal
89 # and start the mds.
90 self.fs.journal_tool(['journal', 'reset'], 0)
91 self.fs.mds_fail_restart()
92 self.fs.wait_for_daemons()
93
94 # List files
95 self.mount_a.mount_wait()
96
97 # First ls -R to populate MDCache, such that hardlinks will
98 # resolve properly (recover_dentries does not create backtraces,
99 # so ordinarily hardlinks to inodes that happen not to have backtraces
100 # will be invisible in readdir).
101 # FIXME: hook in forward scrub here to regenerate backtraces
102 proc = self.mount_a.run_shell(['ls', '-R'])
103 self.mount_a.umount_wait() # remount to clear client cache before our second ls
104 self.mount_a.mount_wait()
105
106 proc = self.mount_a.run_shell(['ls', '-R'])
107 self.assertEqual(proc.stdout.getvalue().strip(),
108 dedent("""
109 .:
110 linkdir
111 rootfile
112 subdir
113
114 ./linkdir:
115 link0
116 link1
117 link2
118 link3
119
120 ./subdir:
121 subdirfile
122 subsubdir
123
124 ./subdir/subsubdir:
125 """).strip())
126
127 # Check the correct inos were preserved by path
128 self.assertEqual(rootfile_ino, self.mount_a.path_to_ino("rootfile"))
129 self.assertEqual(subdir_ino, self.mount_a.path_to_ino("subdir"))
130 self.assertEqual(subdirfile_ino, self.mount_a.path_to_ino("subdir/subdirfile"))
131 self.assertEqual(subsubdir_ino, self.mount_a.path_to_ino("subdir/subsubdir"))
132
133 # Check that the hard link handling came out correctly
134 self.assertEqual(self.mount_a.path_to_ino("linkdir/link0"), subdirfile_ino)
135 self.assertEqual(self.mount_a.path_to_ino("linkdir/link1"), subdirfile_ino)
136 self.assertNotEqual(self.mount_a.path_to_ino("linkdir/link2"), subdirfile_ino)
137 self.assertEqual(self.mount_a.path_to_ino("linkdir/link3"), rootfile_ino)
138
139 # Create a new file, ensure it is not issued the same ino as one of the
140 # recovered ones
141 self.mount_a.run_shell(["touch", "afterwards"])
142 new_ino = self.mount_a.path_to_ino("afterwards")
143 self.assertNotIn(new_ino, [rootfile_ino, subdir_ino, subdirfile_ino])
144
145 # Check that we can do metadata ops in the recovered directory
146 self.mount_a.run_shell(["touch", "subdir/subsubdir/subsubdirfile"])
147
148 @for_teuthology # 308s
149 def test_reset(self):
150 """
151 That after forcibly modifying the backing store, we can get back into
152 a good state by resetting the MDSMap.
153
154 The scenario is that we have two active MDSs, and we lose the journals. Once
155 we have completely lost confidence in the integrity of the metadata, we want to
156 return the system to a single-MDS state to go into a scrub to recover what we
157 can.
158 """
159
160 # Set max_mds to 2
161 self.fs.set_max_mds(2)
162 status = self.fs.wait_for_daemons()
163 active_mds_names = self.fs.get_active_names(status=status)
164
165 # Switch off any unneeded MDS daemons
166 for unneeded_mds in set(self.mds_cluster.mds_ids) - set(active_mds_names):
167 self.mds_cluster.mds_stop(unneeded_mds)
168 self.mds_cluster.mds_fail(unneeded_mds)
169
170 # Create a dir on each rank
171 self.mount_a.run_shell_payload("mkdir {alpha,bravo} && touch {alpha,bravo}/file")
172 self.mount_a.setfattr("alpha/", "ceph.dir.pin", "0")
173 self.mount_a.setfattr("bravo/", "ceph.dir.pin", "1")
174
175 # Ensure the pinning has taken effect and the /bravo dir is now
176 # migrated to rank 1.
177 self._wait_subtrees([('/bravo', 1), ('/alpha', 0)], rank=0, status=status)
178
179 # Do some IO (this should be split across ranks according to
180 # the rank-pinned dirs)
181 self.mount_a.create_n_files("alpha/file", 1000)
182 self.mount_a.create_n_files("bravo/file", 1000)
183
184 # Flush the journals so that we have some backing store data
185 # belonging to one MDS, and some to the other MDS.
186 for mds_name in active_mds_names:
187 self.fs.mds_asok(["flush", "journal"], mds_name)
188
189 # Stop (hard) the second MDS daemon
190 self.fs.mds_stop(active_mds_names[1])
191
192 # Wipe out the tables for MDS rank 1 so that it is broken and can't start
193 # (this is the simulated failure that we will demonstrate that the disaster
194 # recovery tools can get us back from)
195 self.fs.erase_metadata_objects(prefix="mds1_")
196
197 # Try to access files from the client
198 blocked_ls = self.mount_a.run_shell(["ls", "-R"], wait=False)
199
200 # Check that this "ls -R" blocked rather than completing: indicates
201 # it got stuck trying to access subtrees which were on the now-dead MDS.
202 log.info("Sleeping to check ls is blocked...")
203 time.sleep(60)
204 self.assertFalse(blocked_ls.finished)
205
206 # This mount is now useless because it will depend on MDS rank 1, and MDS rank 1
207 # is not coming back. Kill it.
208 log.info("Killing mount, it's blocked on the MDS we killed")
209 self.mount_a.kill()
210 self.mount_a.kill_cleanup()
211 try:
212 # Now that the mount is dead, the ls -R should error out.
213 blocked_ls.wait()
214 except (CommandFailedError, ConnectionLostError):
215 # The ConnectionLostError case is for kernel client, where
216 # killing the mount also means killing the node.
217 pass
218
219 # See that the second MDS will crash when it starts and tries to
220 # acquire rank 1
221 damaged_id = active_mds_names[1]
222 self.fs.mds_restart(damaged_id)
223
224 # The daemon taking the damaged rank should start starting, then
225 # restart back into standby after asking the mon to mark the rank
226 # damaged.
227 def is_marked_damaged():
228 mds_map = self.fs.get_mds_map()
229 return 1 in mds_map['damaged']
230
231 self.wait_until_true(is_marked_damaged, 60)
232
233 def get_state():
234 info = self.mds_cluster.get_mds_info(damaged_id)
235 return info['state'] if info is not None else None
236
237 self.wait_until_equal(
238 get_state,
239 "up:standby",
240 timeout=60)
241
242 self.fs.mds_stop(damaged_id)
243 self.fs.mds_fail(damaged_id)
244
245 # Now give up and go through a disaster recovery procedure
246 self.fs.mds_stop(active_mds_names[0])
247 self.fs.mds_fail(active_mds_names[0])
248 # Invoke recover_dentries quietly, because otherwise log spews millions of lines
249 self.fs.journal_tool(["event", "recover_dentries", "summary"], 0, quiet=True)
250 self.fs.journal_tool(["event", "recover_dentries", "summary"], 1, quiet=True)
251 self.fs.table_tool(["0", "reset", "session"])
252 self.fs.journal_tool(["journal", "reset"], 0)
253 self.fs.erase_mds_objects(1)
254 self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
255 '--yes-i-really-mean-it')
256
257 # Bring an MDS back online, mount a client, and see that we can walk the full
258 # filesystem tree again
259 self.fs.mds_fail_restart(active_mds_names[0])
260 self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30,
261 reject_fn=lambda v: len(v) > 1)
262 self.mount_a.mount_wait()
263 self.mount_a.run_shell(["ls", "-R"], wait=True)
264
265 def test_table_tool(self):
266 active_mdss = self.fs.get_active_names()
267 self.assertEqual(len(active_mdss), 1)
268 mds_name = active_mdss[0]
269
270 self.mount_a.run_shell(["touch", "foo"])
271 self.fs.mds_asok(["flush", "journal"], mds_name)
272
273 log.info(self.fs.table_tool(["all", "show", "inode"]))
274 log.info(self.fs.table_tool(["all", "show", "snap"]))
275 log.info(self.fs.table_tool(["all", "show", "session"]))
276
277 # Inode table should always be the same because initial state
278 # and choice of inode are deterministic.
279 # Should see one inode consumed
280 self.assertEqual(
281 json.loads(self.fs.table_tool(["all", "show", "inode"])),
282 {"0": {
283 "data": {
284 "version": 2,
285 "inotable": {
286 "projected_free": [
287 {"start": 1099511628777,
288 "len": 1099511626775}],
289 "free": [
290 {"start": 1099511628777,
291 "len": 1099511626775}]}},
292 "result": 0}}
293
294 )
295
296 # Should see one session
297 session_data = json.loads(self.fs.table_tool(
298 ["all", "show", "session"]))
299 self.assertEqual(len(session_data["0"]["data"]["sessions"]), 1)
300 self.assertEqual(session_data["0"]["result"], 0)
301
302 # Should see no snaps
303 self.assertEqual(
304 json.loads(self.fs.table_tool(["all", "show", "snap"])),
305 {"version": 1,
306 "snapserver": {"last_snap": 1,
307 "last_created": 1,
308 "last_destroyed": 1,
309 "pending_noop": [],
310 "snaps": [],
311 "need_to_purge": {},
312 "pending_update": [],
313 "pending_destroy": []},
314 "result": 0}
315 )
316
317 # Reset everything
318 for table in ["session", "inode", "snap"]:
319 self.fs.table_tool(["all", "reset", table])
320
321 log.info(self.fs.table_tool(["all", "show", "inode"]))
322 log.info(self.fs.table_tool(["all", "show", "snap"]))
323 log.info(self.fs.table_tool(["all", "show", "session"]))
324
325 # Should see 0 sessions
326 session_data = json.loads(self.fs.table_tool(
327 ["all", "show", "session"]))
328 self.assertEqual(len(session_data["0"]["data"]["sessions"]), 0)
329 self.assertEqual(session_data["0"]["result"], 0)
330
331 # Should see entire inode range now marked free
332 self.assertEqual(
333 json.loads(self.fs.table_tool(["all", "show", "inode"])),
334 {"0": {"data": {"version": 1,
335 "inotable": {"projected_free": [
336 {"start": 1099511627776,
337 "len": 1099511627776}],
338 "free": [
339 {"start": 1099511627776,
340 "len": 1099511627776}]}},
341 "result": 0}}
342 )
343
344 # Should see no snaps
345 self.assertEqual(
346 json.loads(self.fs.table_tool(["all", "show", "snap"])),
347 {"version": 1,
348 "snapserver": {"last_snap": 1,
349 "last_created": 1,
350 "last_destroyed": 1,
351 "pending_noop": [],
352 "snaps": [],
353 "need_to_purge": {},
354 "pending_update": [],
355 "pending_destroy": []},
356 "result": 0}
357 )
358
359 def test_table_tool_take_inos(self):
360 initial_range_start = 1099511627776
361 initial_range_len = 1099511627776
362 # Initially a completely clear range
363 self.assertEqual(
364 json.loads(self.fs.table_tool(["all", "show", "inode"])),
365 {"0": {"data": {"version": 0,
366 "inotable": {"projected_free": [
367 {"start": initial_range_start,
368 "len": initial_range_len}],
369 "free": [
370 {"start": initial_range_start,
371 "len": initial_range_len}]}},
372 "result": 0}}
373 )
374
375 # Remove some
376 self.assertEqual(
377 json.loads(self.fs.table_tool(["all", "take_inos", "{0}".format(initial_range_start + 100)])),
378 {"0": {"data": {"version": 1,
379 "inotable": {"projected_free": [
380 {"start": initial_range_start + 101,
381 "len": initial_range_len - 101}],
382 "free": [
383 {"start": initial_range_start + 101,
384 "len": initial_range_len - 101}]}},
385 "result": 0}}
386 )
387
388 @for_teuthology # Hack: "for_teuthology" because .sh doesn't work outside teuth
389 def test_journal_smoke(self):
390 workunit(self.ctx, {
391 'clients': {
392 "client.{0}".format(self.mount_a.client_id): [
393 "fs/misc/trivial_sync.sh"],
394 },
395 "timeout": "1h"
396 })
397
398 for mount in self.mounts:
399 mount.umount_wait()
400
401 self.fs.mds_stop()
402 self.fs.mds_fail()
403
404 # journal tool smoke
405 workunit(self.ctx, {
406 'clients': {
407 "client.{0}".format(self.mount_a.client_id): [
408 "suites/cephfs_journal_tool_smoke.sh"],
409 },
410 "timeout": "1h"
411 })
412
413
414
415 self.fs.mds_restart()
416 self.fs.wait_for_daemons()
417
418 self.mount_a.mount_wait()
419
420 # trivial sync moutn a
421 workunit(self.ctx, {
422 'clients': {
423 "client.{0}".format(self.mount_a.client_id): [
424 "fs/misc/trivial_sync.sh"],
425 },
426 "timeout": "1h"
427 })
428