1 from io
import StringIO
2 from logging
import getLogger
3 from os
import getcwd
as os_getcwd
4 from os
.path
import join
5 from textwrap
import dedent
8 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
9 from tasks
.cephfs
.fuse_mount
import FuseMount
10 from tasks
.cephfs
.kernel_mount
import KernelMount
13 log
= getLogger(__name__
)
16 # TODO: add code to run non-ACL tests too.
17 # TODO: make xfstests-dev tests running without running `make install`.
18 class XFSTestsDev(CephFSTestCase
):
20 RESULTS_DIR
= "results"
23 super(XFSTestsDev
, self
).setUp()
24 self
.setup_xfsprogs_devs()
25 self
.prepare_xfstests_devs()
27 def setup_xfsprogs_devs(self
):
28 self
.install_xfsprogs
= False
30 def prepare_xfstests_devs(self
):
31 # NOTE: To run a quick test with vstart_runner.py, enable next line
32 # and disable calls to get_repo(), install_deps(), and
33 # build_and_install() and also disable lines in tearDown() for repo
35 #self.xfstests_repo_path = '/path/to/xfstests-dev'
38 self
.get_test_and_scratch_dirs_ready()
40 self
.create_reqd_users()
41 self
.write_local_config()
42 self
.write_ceph_exclude()
43 self
.build_and_install()
46 self
.del_users_and_groups()
48 super(XFSTestsDev
, self
).tearDown()
50 def del_users_and_groups(self
):
51 self
.mount_a
.client_remote
.run(args
=['sudo', 'userdel', '--force',
53 omit_sudo
=False, check_status
=False)
54 self
.mount_a
.client_remote
.run(args
=['sudo', 'userdel', '--force',
55 '--remove', '123456-fsgqa'],
56 omit_sudo
=False, check_status
=False)
57 self
.mount_a
.client_remote
.run(args
=['sudo', 'groupdel', 'fsgqa'],
58 omit_sudo
=False, check_status
=False)
61 self
.save_results_dir()
62 self
.mount_a
.client_remote
.run(args
=f
'sudo rm -rf {self.xfstests_repo_path}',
63 omit_sudo
=False, check_status
=False)
65 if self
.install_xfsprogs
:
66 self
.mount_a
.client_remote
.run(args
=f
'sudo rm -rf {self.xfsprogs_repo_path}',
67 omit_sudo
=False, check_status
=False)
69 def save_results_dir(self
):
71 When tests in xfstests-dev repo are executed, logs are created and
72 saved, under a directory named "results" that lies at the repo root.
73 In case a test from xfstests-dev repo fails, these logs will help find
74 the cause of the failure.
76 Since there's no option in teuthology to copy a directory lying at a
77 custom location in order to save it from teuthology test runner's tear
78 down, let's copy this directory to a standard location that teuthology
79 copies away before erasing all data on the test machine. The standard
80 location chosen in the case here is the Ceph log directory.
82 In case of vstart_runner.py, this methods does nothing.
84 # No need to save results dir in case of vstart_runner.py.
85 for x
in ('LocalFuseMount', 'LocalKernelMount'):
86 if x
in self
.mount_a
.__class
__.__name
__:
89 src
= join(self
.xfstests_repo_path
, self
.RESULTS_DIR
)
91 if self
.mount_a
.run_shell(f
'sudo stat {src}',
92 check_status
=False, omit_sudo
=False).returncode
!= 0:
93 log
.info(f
'xfstests-dev repo contains not directory named '
94 f
'"{self.RESULTS_DIR}". repo location: {self.xfstests_repo_path}')
97 std_loc
= '/var/log/ceph' # standard location
98 dst
= join(std_loc
, 'xfstests-dev-results')
99 self
.mount_a
.run_shell(f
'sudo mkdir -p {dst}', omit_sudo
=False)
100 self
.mount_a
.run_shell(f
'sudo cp -r {src} {dst}', omit_sudo
=False)
101 log
.info(f
'results dir from xfstests-dev has been saved; it was '
102 f
'copied from {self.xfstests_repo_path} to {std_loc}.')
104 def build_and_install(self
):
105 # NOTE: On teuthology machines it's necessary to run "make" as
106 # superuser since the repo is cloned somewhere in /tmp.
107 self
.mount_a
.client_remote
.run(args
=['sudo', 'make'],
108 cwd
=self
.xfstests_repo_path
, stdout
=StringIO(),
110 self
.mount_a
.client_remote
.run(args
=['sudo', 'make', 'install'],
111 cwd
=self
.xfstests_repo_path
, omit_sudo
=False,
112 stdout
=StringIO(), stderr
=StringIO())
114 if self
.install_xfsprogs
:
115 self
.mount_a
.client_remote
.run(args
=['sudo', 'make'],
116 cwd
=self
.xfsprogs_repo_path
,
117 stdout
=StringIO(), stderr
=StringIO())
118 self
.mount_a
.client_remote
.run(args
=['sudo', 'make', 'install'],
119 cwd
=self
.xfsprogs_repo_path
, omit_sudo
=False,
120 stdout
=StringIO(), stderr
=StringIO())
124 Clone xfstests_dev and xfsprogs-dev repositories. If already present,
125 update them. The xfsprogs-dev will be used to test the encrypt.
127 # TODO: make sure that repo is not cloned for every test. it should
129 remoteurl
= 'https://git.ceph.com/xfstests-dev.git'
130 self
.xfstests_repo_path
= self
.mount_a
.client_remote
.mkdtemp(suffix
=
132 self
.mount_a
.run_shell(['git', 'clone', remoteurl
, '--depth', '1',
133 self
.xfstests_repo_path
])
135 if self
.install_xfsprogs
:
136 remoteurl
= 'https://git.ceph.com/xfsprogs-dev.git'
137 self
.xfsprogs_repo_path
= self
.mount_a
.client_remote
.mkdtemp(suffix
=
139 self
.mount_a
.run_shell(['git', 'clone', remoteurl
, '--depth', '1',
140 self
.xfsprogs_repo_path
])
142 def get_admin_key(self
):
145 cp
= configparser
.ConfigParser()
146 cp
.read_string(self
.fs
.mon_manager
.raw_cluster_cmd(
147 'auth', 'get-or-create', 'client.admin'))
149 return cp
['client.admin']['key']
151 def get_test_and_scratch_dirs_ready(self
):
152 """ "test" and "scratch" directories are directories inside Ceph FS.
153 And, test and scratch mounts are path on the local FS where "test"
154 and "scratch" directories would be mounted. Look at xfstests-dev
155 local.config's template inside this file to get some context.
157 self
.test_dirname
= 'test'
158 self
.mount_a
.run_shell(['mkdir', self
.test_dirname
])
159 # read var name as "test dir's mount path"
160 self
.test_dirs_mount_path
= self
.mount_a
.client_remote
.mkdtemp(
161 suffix
=self
.test_dirname
)
163 self
.scratch_dirname
= 'scratch'
164 self
.mount_a
.run_shell(['mkdir', self
.scratch_dirname
])
165 # read var name as "scratch dir's mount path"
166 self
.scratch_dirs_mount_path
= self
.mount_a
.client_remote
.mkdtemp(
167 suffix
=self
.scratch_dirname
)
169 def install_deps(self
):
170 from teuthology
.misc
import get_system_type
172 distro
, version
= get_system_type(self
.mount_a
.client_remote
,
173 distro
=True, version
=True)
174 distro
= distro
.lower()
175 major_ver_num
= int(version
.split('.')[0]) # only keep major release
177 log
.info(f
'distro and version detected is "{distro}" and "{version}".')
179 # we keep fedora here so that right deps are installed when this test
180 # is run locally by a dev.
181 if distro
in ('redhatenterpriseserver', 'redhatenterprise', 'fedora',
182 'centos', 'centosstream', 'rhel'):
183 deps
= """acl attr automake bc dbench dump e2fsprogs fio \
184 gawk gcc indent libtool lvm2 make psmisc quota sed \
186 libacl-devel libattr-devel libaio-devel libuuid-devel \
187 xfsprogs-devel btrfs-progs-devel python2 sqlite""".split()
189 if self
.install_xfsprogs
:
190 deps
+= ['inih-devel', 'userspace-rcu-devel', 'libblkid-devel',
191 'gettext', 'libedit-devel', 'libattr-devel',
192 'device-mapper-devel', 'libicu-devel']
194 deps_old_distros
= ['xfsprogs-qa-devel']
196 if distro
!= 'fedora' and major_ver_num
> 7:
197 deps
.remove('btrfs-progs-devel')
199 args
= ['sudo', 'yum', 'install', '-y'] + deps
+ deps_old_distros
200 elif distro
== 'ubuntu':
201 deps
= """xfslibs-dev uuid-dev libtool-bin \
202 e2fsprogs automake gcc libuuid1 quota attr libattr1-dev make \
203 libacl1-dev libaio-dev xfsprogs libgdbm-dev gawk fio dbench \
204 uuid-runtime python sqlite3""".split()
206 if self
.install_xfsprogs
:
207 deps
+= ['libinih-dev', 'liburcu-dev', 'libblkid-dev',
208 'gettext', 'libedit-dev', 'libattr1-dev',
209 'libdevmapper-dev', 'libicu-dev', 'pkg-config']
211 if major_ver_num
>= 19:
212 deps
[deps
.index('python')] ='python2'
213 args
= ['sudo', 'apt-get', 'install', '-y'] + deps
215 raise RuntimeError('expected a yum based or a apt based system')
217 self
.mount_a
.client_remote
.run(args
=args
, omit_sudo
=False)
219 def create_reqd_users(self
):
220 self
.mount_a
.client_remote
.run(args
=['sudo', 'useradd', '-m', 'fsgqa'],
221 omit_sudo
=False, check_status
=False)
222 self
.mount_a
.client_remote
.run(args
=['sudo', 'groupadd', 'fsgqa'],
223 omit_sudo
=False, check_status
=False)
224 self
.mount_a
.client_remote
.run(args
=['sudo', 'useradd', 'fsgqa2'],
225 omit_sudo
=False, check_status
=False)
226 self
.mount_a
.client_remote
.run(args
=['sudo', 'useradd',
227 '123456-fsgqa'], omit_sudo
=False,
230 def write_local_config(self
, options
=None):
231 if isinstance(self
.mount_a
, KernelMount
):
232 conf_contents
= self
._gen
_conf
_for
_kernel
_mnt
(options
)
233 elif isinstance(self
.mount_a
, FuseMount
):
234 conf_contents
= self
._gen
_conf
_for
_fuse
_mnt
(options
)
236 self
.mount_a
.client_remote
.write_file(join(self
.xfstests_repo_path
,
238 conf_contents
, sudo
=True)
239 log
.info(f
'local.config\'s contents -\n{conf_contents}')
241 def _gen_conf_for_kernel_mnt(self
, options
=None):
243 Generate local.config for CephFS kernel client.
245 _options
= '' if not options
else ',' + options
246 mon_sock
= self
.fs
.mon_manager
.get_msgrv1_mon_socks()[0]
247 test_dev
= mon_sock
+ ':/' + self
.test_dirname
248 scratch_dev
= mon_sock
+ ':/' + self
.scratch_dirname
252 export TEST_DEV={test_dev}
253 export TEST_DIR={self.test_dirs_mount_path}
254 export SCRATCH_DEV={scratch_dev}
255 export SCRATCH_MNT={self.scratch_dirs_mount_path}
256 export CEPHFS_MOUNT_OPTIONS="-o name=admin,secret={self.get_admin_key()}{_options}"
259 def _gen_conf_for_fuse_mnt(self
, options
=None):
261 Generate local.config for CephFS FUSE client.
263 mon_sock
= self
.fs
.mon_manager
.get_msgrv1_mon_socks()[0]
264 test_dev
= 'ceph-fuse'
266 # XXX: Please note that ceph_fuse_bin_path is not ideally required
267 # because ceph-fuse binary ought to be present in one of the standard
268 # locations during teuthology tests. But then testing with
269 # vstart_runner.py will not work since ceph-fuse binary won't be
270 # present in a standard locations during these sessions. Thus, this
272 ceph_fuse_bin_path
= 'ceph-fuse' # bin expected to be in env
273 if 'LocalFuseMount' in str(type(self
.mount_a
)): # for vstart_runner.py runs
274 ceph_fuse_bin_path
= join(os_getcwd(), 'bin', 'ceph-fuse')
276 keyring_path
= self
.mount_a
.client_remote
.mktemp(
277 data
=self
.fs
.mon_manager
.get_keyring('client.admin')+'\n')
279 lastline
= (f
'export CEPHFS_MOUNT_OPTIONS="-m {mon_sock} -k '
280 f
'{keyring_path} --client_mountpoint /{self.test_dirname}')
281 lastline
+= f
'-o {options}"' if options
else '"'
284 export FSTYP=ceph-fuse
285 export CEPH_FUSE_BIN_PATH={ceph_fuse_bin_path}
286 export TEST_DEV={test_dev} # without this tests won't get started
287 export TEST_DIR={self.test_dirs_mount_path}
288 export SCRATCH_DEV={scratch_dev}
289 export SCRATCH_MNT={self.scratch_dirs_mount_path}
293 def write_ceph_exclude(self
):
294 # These tests will fail or take too much time and will
295 # make the test timedout, just skip them for now.
296 xfstests_exclude_contents
= dedent('''\
297 {c}/001 {g}/003 {g}/020 {g}/075 {g}/317 {g}/538 {g}/531
298 ''').format(g
="generic", c
="ceph")
300 self
.mount_a
.client_remote
.write_file(join(self
.xfstests_repo_path
, 'ceph.exclude'),
301 xfstests_exclude_contents
, sudo
=True)