]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs_test_runner.py
5 from unittest
import suite
, loader
, case
6 from teuthology
.task
import interactive
7 from teuthology
import misc
8 from tasks
.cephfs
.filesystem
import Filesystem
, MDSCluster
, CephCluster
9 from tasks
.mgr
.mgr_test_case
import MgrCluster
11 log
= logging
.getLogger(__name__
)
14 class DecoratingLoader(loader
.TestLoader
):
16 A specialization of TestLoader that tags some extra attributes
17 onto test classes as they are loaded.
19 def __init__(self
, params
):
21 super(DecoratingLoader
, self
).__init
__()
23 def _apply_params(self
, obj
):
24 for k
, v
in self
._params
.items():
27 def loadTestsFromTestCase(self
, testCaseClass
):
28 self
._apply
_params
(testCaseClass
)
29 return super(DecoratingLoader
, self
).loadTestsFromTestCase(testCaseClass
)
31 def loadTestsFromName(self
, name
, module
=None):
32 result
= super(DecoratingLoader
, self
).loadTestsFromName(name
, module
)
34 # Special case for when we were called with the name of a method, we get
35 # a suite with one TestCase
36 tests_in_result
= list(result
)
37 if len(tests_in_result
) == 1 and isinstance(tests_in_result
[0], case
.TestCase
):
38 self
._apply
_params
(tests_in_result
[0])
43 class LogStream(object):
47 def write(self
, data
):
49 if "\n" in self
.buffer:
50 lines
= self
.buffer.split("\n")
51 for line
in lines
[:-1]:
53 self
.buffer = lines
[-1]
59 class InteractiveFailureResult(unittest
.TextTestResult
):
61 Specialization that implements interactive-on-error style
66 def addFailure(self
, test
, err
):
67 log
.error(self
._exc
_info
_to
_string
(err
, test
))
68 log
.error("Failure in test '{0}', going interactive".format(
69 self
.getDescription(test
)
71 interactive
.task(ctx
=self
.ctx
, config
=None)
73 def addError(self
, test
, err
):
74 log
.error(self
._exc
_info
_to
_string
(err
, test
))
75 log
.error("Error in test '{0}', going interactive".format(
76 self
.getDescription(test
)
78 interactive
.task(ctx
=self
.ctx
, config
=None)
81 @contextlib.contextmanager
82 def task(ctx
, config
):
84 Run the CephFS test cases.
86 Run everything in tasks/cephfs/test_*.py:
96 `modules` argument allows running only some specific modules:
102 - cephfs_test_runner:
104 - tasks.cephfs.test_sessionmap
105 - tasks.cephfs.test_auto_repair
107 By default, any cases that can't be run on the current cluster configuration
108 will generate a failure. When the optional `fail_on_skip` argument is set
109 to false, any tests that can't be run on the current configuration will
115 - cephfs_test_runner:
120 ceph_cluster
= CephCluster(ctx
)
122 if len(list(misc
.all_roles_of_type(ctx
.cluster
, 'mds'))):
123 mds_cluster
= MDSCluster(ctx
)
129 if len(list(misc
.all_roles_of_type(ctx
.cluster
, 'mgr'))):
130 mgr_cluster
= MgrCluster(ctx
)
134 # Mount objects, sorted by ID
135 if hasattr(ctx
, 'mounts'):
136 mounts
= [v
for k
, v
in sorted(ctx
.mounts
.items(), lambda a
, b
: cmp(a
[0], b
[0]))]
138 # The test configuration has a filesystem but no fuse/kclient mounts
141 decorating_loader
= DecoratingLoader({
145 "ceph_cluster": ceph_cluster
,
146 "mds_cluster": mds_cluster
,
147 "mgr_cluster": mgr_cluster
,
150 fail_on_skip
= config
.get('fail_on_skip', True)
152 # Put useful things onto ctx for interactive debugging
154 ctx
.mds_cluster
= mds_cluster
155 ctx
.mgr_cluster
= mgr_cluster
157 # Depending on config, either load specific modules, or scan for moduless
158 if config
and 'modules' in config
and config
['modules']:
160 for mod_name
in config
['modules']:
161 # Test names like cephfs.test_auto_repair
162 module_suites
.append(decorating_loader
.loadTestsFromName(mod_name
))
163 overall_suite
= suite
.TestSuite(module_suites
)
165 # Default, run all tests
166 overall_suite
= decorating_loader
.discover(
168 os
.path
.dirname(os
.path
.abspath(__file__
)),
173 if ctx
.config
.get("interactive-on-error", False):
174 InteractiveFailureResult
.ctx
= ctx
175 result_class
= InteractiveFailureResult
177 result_class
= unittest
.TextTestResult
179 class LoggingResult(result_class
):
180 def startTest(self
, test
):
181 log
.info("Starting test: {0}".format(self
.getDescription(test
)))
182 return super(LoggingResult
, self
).startTest(test
)
184 def addSkip(self
, test
, reason
):
186 # Don't just call addFailure because that requires a traceback
187 self
.failures
.append((test
, reason
))
189 super(LoggingResult
, self
).addSkip(test
, reason
)
192 result
= unittest
.TextTestRunner(
194 resultclass
=LoggingResult
,
196 failfast
=True).run(overall_suite
)
198 if not result
.wasSuccessful():
199 result
.printErrors() # duplicate output at end for convenience
202 for test
, error
in result
.errors
:
203 bad_tests
.append(str(test
))
204 for test
, failure
in result
.failures
:
205 bad_tests
.append(str(test
))
207 raise RuntimeError("Test failure: {0}".format(", ".join(bad_tests
)))