]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs_test_runner.py
5 from unittest
import suite
, loader
, case
6 from teuthology
.task
import interactive
7 from teuthology
import misc
8 from tasks
.cephfs
.filesystem
import Filesystem
, MDSCluster
, CephCluster
9 from tasks
.mgr
.mgr_test_case
import MgrCluster
11 log
= logging
.getLogger(__name__
)
14 class DecoratingLoader(loader
.TestLoader
):
16 A specialization of TestLoader that tags some extra attributes
17 onto test classes as they are loaded.
19 def __init__(self
, params
):
21 super(DecoratingLoader
, self
).__init
__()
23 def _apply_params(self
, obj
):
24 for k
, v
in self
._params
.items():
25 if obj
.__class
__ is type:
31 def loadTestsFromTestCase(self
, testCaseClass
):
32 self
._apply
_params
(testCaseClass
)
33 return super(DecoratingLoader
, self
).loadTestsFromTestCase(testCaseClass
)
35 def loadTestsFromName(self
, name
, module
=None):
36 result
= super(DecoratingLoader
, self
).loadTestsFromName(name
, module
)
38 # Special case for when we were called with the name of a method, we get
39 # a suite with one TestCase
40 tests_in_result
= list(result
)
41 if len(tests_in_result
) == 1 and isinstance(tests_in_result
[0], case
.TestCase
):
42 self
._apply
_params
(tests_in_result
[0])
47 class LogStream(object):
51 def write(self
, data
):
53 if "\n" in self
.buffer:
54 lines
= self
.buffer.split("\n")
55 for line
in lines
[:-1]:
57 self
.buffer = lines
[-1]
63 class InteractiveFailureResult(unittest
.TextTestResult
):
65 Specialization that implements interactive-on-error style
70 def addFailure(self
, test
, err
):
71 log
.error(self
._exc
_info
_to
_string
(err
, test
))
72 log
.error("Failure in test '{0}', going interactive".format(
73 self
.getDescription(test
)
75 interactive
.task(ctx
=self
.ctx
, config
=None)
77 def addError(self
, test
, err
):
78 log
.error(self
._exc
_info
_to
_string
(err
, test
))
79 log
.error("Error in test '{0}', going interactive".format(
80 self
.getDescription(test
)
82 interactive
.task(ctx
=self
.ctx
, config
=None)
85 @contextlib.contextmanager
86 def task(ctx
, config
):
88 Run the CephFS test cases.
90 Run everything in tasks/cephfs/test_*.py:
100 `modules` argument allows running only some specific modules:
106 - cephfs_test_runner:
108 - tasks.cephfs.test_sessionmap
109 - tasks.cephfs.test_auto_repair
111 By default, any cases that can't be run on the current cluster configuration
112 will generate a failure. When the optional `fail_on_skip` argument is set
113 to false, any tests that can't be run on the current configuration will
119 - cephfs_test_runner:
124 ceph_cluster
= CephCluster(ctx
)
126 if len(list(misc
.all_roles_of_type(ctx
.cluster
, 'mds'))):
127 mds_cluster
= MDSCluster(ctx
)
133 if len(list(misc
.all_roles_of_type(ctx
.cluster
, 'mgr'))):
134 mgr_cluster
= MgrCluster(ctx
)
138 # Mount objects, sorted by ID
139 if hasattr(ctx
, 'mounts'):
140 mounts
= [v
for k
, v
in sorted(ctx
.mounts
.items(), key
=lambda mount
: mount
[0])]
142 # The test configuration has a filesystem but no fuse/kclient mounts
145 decorating_loader
= DecoratingLoader({
149 "ceph_cluster": ceph_cluster
,
150 "mds_cluster": mds_cluster
,
151 "mgr_cluster": mgr_cluster
,
154 fail_on_skip
= config
.get('fail_on_skip', True)
156 # Put useful things onto ctx for interactive debugging
158 ctx
.mds_cluster
= mds_cluster
159 ctx
.mgr_cluster
= mgr_cluster
161 # Depending on config, either load specific modules, or scan for moduless
162 if config
and 'modules' in config
and config
['modules']:
164 for mod_name
in config
['modules']:
165 # Test names like cephfs.test_auto_repair
166 module_suites
.append(decorating_loader
.loadTestsFromName(mod_name
))
167 overall_suite
= suite
.TestSuite(module_suites
)
169 # Default, run all tests
170 overall_suite
= decorating_loader
.discover(
172 os
.path
.dirname(os
.path
.abspath(__file__
)),
177 if ctx
.config
.get("interactive-on-error", False):
178 InteractiveFailureResult
.ctx
= ctx
179 result_class
= InteractiveFailureResult
181 result_class
= unittest
.TextTestResult
183 class LoggingResult(result_class
):
184 def startTest(self
, test
):
185 log
.info("Starting test: {0}".format(self
.getDescription(test
)))
186 return super(LoggingResult
, self
).startTest(test
)
188 def addSkip(self
, test
, reason
):
190 # Don't just call addFailure because that requires a traceback
191 self
.failures
.append((test
, reason
))
193 super(LoggingResult
, self
).addSkip(test
, reason
)
196 result
= unittest
.TextTestRunner(
198 resultclass
=LoggingResult
,
200 failfast
=True).run(overall_suite
)
202 if not result
.wasSuccessful():
203 result
.printErrors() # duplicate output at end for convenience
206 for test
, error
in result
.errors
:
207 bad_tests
.append(str(test
))
208 for test
, failure
in result
.failures
:
209 bad_tests
.append(str(test
))
211 raise RuntimeError("Test failure: {0}".format(", ".join(bad_tests
)))