]> git.proxmox.com Git - rustc.git/blob - src/llvm/utils/lit/lit/Test.py
Imported Upstream version 1.0.0~0alpha
[rustc.git] / src / llvm / utils / lit / lit / Test.py
1 import os
2
3 # Test result codes.
4
5 class ResultCode(object):
6 """Test result codes."""
7
8 # We override __new__ and __getnewargs__ to ensure that pickling still
9 # provides unique ResultCode objects in any particular instance.
10 _instances = {}
11 def __new__(cls, name, isFailure):
12 res = cls._instances.get(name)
13 if res is None:
14 cls._instances[name] = res = super(ResultCode, cls).__new__(cls)
15 return res
16 def __getnewargs__(self):
17 return (self.name, self.isFailure)
18
19 def __init__(self, name, isFailure):
20 self.name = name
21 self.isFailure = isFailure
22
23 def __repr__(self):
24 return '%s%r' % (self.__class__.__name__,
25 (self.name, self.isFailure))
26
27 PASS = ResultCode('PASS', False)
28 XFAIL = ResultCode('XFAIL', False)
29 FAIL = ResultCode('FAIL', True)
30 XPASS = ResultCode('XPASS', True)
31 UNRESOLVED = ResultCode('UNRESOLVED', True)
32 UNSUPPORTED = ResultCode('UNSUPPORTED', False)
33
34 # Test metric values.
35
36 class MetricValue(object):
37 def format(self):
38 """
39 format() -> str
40
41 Convert this metric to a string suitable for displaying as part of the
42 console output.
43 """
44 raise RuntimeError("abstract method")
45
46 def todata(self):
47 """
48 todata() -> json-serializable data
49
50 Convert this metric to content suitable for serializing in the JSON test
51 output.
52 """
53 raise RuntimeError("abstract method")
54
55 class IntMetricValue(MetricValue):
56 def __init__(self, value):
57 self.value = value
58
59 def format(self):
60 return str(self.value)
61
62 def todata(self):
63 return self.value
64
65 class RealMetricValue(MetricValue):
66 def __init__(self, value):
67 self.value = value
68
69 def format(self):
70 return '%.4f' % self.value
71
72 def todata(self):
73 return self.value
74
75 # Test results.
76
77 class Result(object):
78 """Wrapper for the results of executing an individual test."""
79
80 def __init__(self, code, output='', elapsed=None):
81 # The result code.
82 self.code = code
83 # The test output.
84 self.output = output
85 # The wall timing to execute the test, if timing.
86 self.elapsed = elapsed
87 # The metrics reported by this test.
88 self.metrics = {}
89
90 def addMetric(self, name, value):
91 """
92 addMetric(name, value)
93
94 Attach a test metric to the test result, with the given name and list of
95 values. It is an error to attempt to attach the metrics with the same
96 name multiple times.
97
98 Each value must be an instance of a MetricValue subclass.
99 """
100 if name in self.metrics:
101 raise ValueError("result already includes metrics for %r" % (
102 name,))
103 if not isinstance(value, MetricValue):
104 raise TypeError("unexpected metric value: %r" % (value,))
105 self.metrics[name] = value
106
107 # Test classes.
108
109 class TestSuite:
110 """TestSuite - Information on a group of tests.
111
112 A test suite groups together a set of logically related tests.
113 """
114
115 def __init__(self, name, source_root, exec_root, config):
116 self.name = name
117 self.source_root = source_root
118 self.exec_root = exec_root
119 # The test suite configuration.
120 self.config = config
121
122 def getSourcePath(self, components):
123 return os.path.join(self.source_root, *components)
124
125 def getExecPath(self, components):
126 return os.path.join(self.exec_root, *components)
127
128 class Test:
129 """Test - Information on a single test instance."""
130
131 def __init__(self, suite, path_in_suite, config, file_path = None):
132 self.suite = suite
133 self.path_in_suite = path_in_suite
134 self.config = config
135 self.file_path = file_path
136 # A list of conditions under which this test is expected to fail. These
137 # can optionally be provided by test format handlers, and will be
138 # honored when the test result is supplied.
139 self.xfails = []
140 # The test result, once complete.
141 self.result = None
142
143 def setResult(self, result):
144 if self.result is not None:
145 raise ArgumentError("test result already set")
146 if not isinstance(result, Result):
147 raise ArgumentError("unexpected result type")
148
149 self.result = result
150
151 # Apply the XFAIL handling to resolve the result exit code.
152 if self.isExpectedToFail():
153 if self.result.code == PASS:
154 self.result.code = XPASS
155 elif self.result.code == FAIL:
156 self.result.code = XFAIL
157
158 def getFullName(self):
159 return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
160
161 def getFilePath(self):
162 if self.file_path:
163 return self.file_path
164 return self.getSourcePath()
165
166 def getSourcePath(self):
167 return self.suite.getSourcePath(self.path_in_suite)
168
169 def getExecPath(self):
170 return self.suite.getExecPath(self.path_in_suite)
171
172 def isExpectedToFail(self):
173 """
174 isExpectedToFail() -> bool
175
176 Check whether this test is expected to fail in the current
177 configuration. This check relies on the test xfails property which by
178 some test formats may not be computed until the test has first been
179 executed.
180 """
181
182 # Check if any of the xfails match an available feature or the target.
183 for item in self.xfails:
184 # If this is the wildcard, it always fails.
185 if item == '*':
186 return True
187
188 # If this is an exact match for one of the features, it fails.
189 if item in self.config.available_features:
190 return True
191
192 # If this is a part of the target triple, it fails.
193 if item in self.suite.config.target_triple:
194 return True
195
196 return False