self.assertEqual(self.fs.list_dirfrag(ROOT_INO), [])
# Execute the dentry recovery, this should populate the backing store
- self.fs.journal_tool(['event', 'recover_dentries', 'list'])
+ self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0)
# Dentries in ROOT_INO are present
self.assertEqual(sorted(self.fs.list_dirfrag(ROOT_INO)), sorted(['rootfile_head', 'subdir_head', 'linkdir_head']))
# Now check the MDS can read what we wrote: truncate the journal
# and start the mds.
- self.fs.journal_tool(['journal', 'reset'])
+ self.fs.journal_tool(['journal', 'reset'], 0)
self.fs.mds_fail_restart()
self.fs.wait_for_daemons()
# List files
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
# First ls -R to populate MDCache, such that hardlinks will
# resolve properly (recover_dentries does not create backtraces,
# FIXME: hook in forward scrub here to regenerate backtraces
proc = self.mount_a.run_shell(['ls', '-R'])
self.mount_a.umount_wait() # remount to clear client cache before our second ls
- self.mount_a.mount()
- self.mount_a.wait_until_mounted()
+ self.mount_a.mount_wait()
proc = self.mount_a.run_shell(['ls', '-R'])
self.assertEqual(proc.stdout.getvalue().strip(),
self.fs.mds_stop(active_mds_names[0])
self.fs.mds_fail(active_mds_names[0])
# Invoke recover_dentries quietly, because otherwise log spews millions of lines
- self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=0, quiet=True)
- self.fs.journal_tool(["event", "recover_dentries", "summary"], rank=1, quiet=True)
+ self.fs.journal_tool(["event", "recover_dentries", "summary"], 0, quiet=True)
+ self.fs.journal_tool(["event", "recover_dentries", "summary"], 1, quiet=True)
self.fs.table_tool(["0", "reset", "session"])
- self.fs.journal_tool(["journal", "reset"], rank=0)
+ self.fs.journal_tool(["journal", "reset"], 0)
self.fs.erase_mds_objects(1)
self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
'--yes-i-really-mean-it')
self.fs.mds_fail_restart(active_mds_names[0])
self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30,
reject_fn=lambda v: len(v) > 1)
- self.mount_a.mount()
+ self.mount_a.mount_wait()
self.mount_a.run_shell(["ls", "-R"], wait=True)
def test_table_tool(self):
# Should see one session
session_data = json.loads(self.fs.table_tool(
["all", "show", "session"]))
- self.assertEqual(len(session_data["0"]["data"]["Sessions"]), 1)
+ self.assertEqual(len(session_data["0"]["data"]["sessions"]), 1)
self.assertEqual(session_data["0"]["result"], 0)
# Should see no snaps
self.assertEqual(
json.loads(self.fs.table_tool(["all", "show", "snap"])),
- {"version": 0,
+ {"version": 1,
"snapserver": {"last_snap": 1,
+ "last_created": 1,
+ "last_destroyed": 1,
"pending_noop": [],
"snaps": [],
"need_to_purge": {},
# Should see 0 sessions
session_data = json.loads(self.fs.table_tool(
["all", "show", "session"]))
- self.assertEqual(len(session_data["0"]["data"]["Sessions"]), 0)
+ self.assertEqual(len(session_data["0"]["data"]["sessions"]), 0)
self.assertEqual(session_data["0"]["result"], 0)
# Should see entire inode range now marked free
json.loads(self.fs.table_tool(["all", "show", "snap"])),
{"version": 1,
"snapserver": {"last_snap": 1,
+ "last_created": 1,
+ "last_destroyed": 1,
"pending_noop": [],
"snaps": [],
"need_to_purge": {},
self.fs.mds_restart()
self.fs.wait_for_daemons()
- self.mount_a.mount()
+ self.mount_a.mount_wait()
# trivial sync moutn a
workunit(self.ctx, {